repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
PlaidWeb/Pushl | pushl/webmentions.py | get_target | python | async def get_target(config, url):
previous = config.cache.get(
'target', url, schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
return previous
if request.cached:
return previous
current = Target(request)
if config.cache:
config.cache.set('target', url, current)
return current | Given a URL, get the webmention endpoint | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/webmentions.py#L160-L180 | [
"async def retry_get(config, url, *args, **kwargs):\n \"\"\" aiohttp wrapper for GET \"\"\"\n return await _retry_do(config.session.get, url, *args,\n **_make_headers(config, kwargs))\n"
] | """ Functions for sending webmentions """
from abc import ABC, abstractmethod
import asyncio
import logging
import urllib.parse
from bs4 import BeautifulSoup
from lxml import etree
import async_lru
from . import caching, utils
LOGGER = logging.getLogger(__name__)
SCHEMA_VERSION = 3
class Endpoint(ABC):
""" Base class for target endpoints """
# pylint:disable=too-few-public-methods
def __init__(self, endpoint):
self.endpoint = endpoint
@abstractmethod
async def send(self, config, entry, target):
""" Send the mention via this protocol """
class WebmentionEndpoint(Endpoint):
""" Implementation of the webmention protocol """
# pylint:disable=too-few-public-methods
async def send(self, config, entry, target):
LOGGER.info("Sending Webmention %s -> %s", entry, target)
retries = 5
while retries > 0:
request = await utils.retry_post(config,
self.endpoint,
data={'source': entry,
'target': target
})
if request and 'retry-after' in request.headers:
retries -= 1
LOGGER.info("%s: retrying after %s seconds",
self.endpoint, request.headers['retry-after'])
asyncio.sleep(float(request.headers['retry-after']))
else:
return request and request.success
LOGGER.info("%s: no more retries", self.endpoint)
return False
class PingbackEndpoint(Endpoint):
""" Implementation of the pingback protocol """
# pylint:disable=too-few-public-methods
@staticmethod
def _make_param(text):
param = etree.Element('param')
value = etree.Element('value')
leaf = etree.Element('string')
leaf.text = text
value.append(leaf)
param.append(value)
return param
async def send(self, config, entry, target):
LOGGER.info("Sending Pingback %s -> %s", entry, target)
root = etree.Element('methodCall')
method = etree.Element('methodName')
method.text = 'pingback.ping'
root.append(method)
params = etree.Element('params')
root.append(params)
params.append(self._make_param(entry))
params.append(self._make_param(target))
body = etree.tostring(root,
xml_declaration=True)
request = await utils.retry_post(config,
self.endpoint,
data=body)
if not request:
LOGGER.info("%s: failed to send ping")
return False
if not request.success:
LOGGER.info("%s -> %s: Got status code %d",
entry, target, request.status)
# someday I'll parse out the response but IDGAF
return request.success
class Target:
""" A target of a webmention """
# pylint:disable=too-few-public-methods
def __init__(self, request):
self.url = str(request.url) # the canonical, final URL
self.status = request.status
self.caching = caching.make_headers(request.headers)
self.schema = SCHEMA_VERSION
if request.success and not request.cached:
self.endpoint = self._get_endpoint(request, request.text)
else:
self.endpoint = None
def _get_endpoint(self, request, text):
def join(url):
return urllib.parse.urljoin(self.url, str(url))
for rel, link in request.links.items():
if link.get('url') and 'webmention' in rel.split():
return WebmentionEndpoint(join(link.get('url')))
if 'X-Pingback' in request.headers:
return PingbackEndpoint(join(request.headers['X-Pingback']))
# Don't try to get a link tag out of a non-text document
ctype = request.headers.get('content-type')
if 'html' not in ctype and 'xml' not in ctype:
return None
soup = BeautifulSoup(text, 'html.parser')
for link in soup.find_all(('link', 'a'), rel='webmention'):
if link.attrs.get('href'):
return WebmentionEndpoint(
urllib.parse.urljoin(self.url,
link.attrs['href']))
for link in soup.find_all(('link', 'a'), rel='pingback'):
if link.attrs.get('href'):
return PingbackEndpoint(
urllib.parse.urljoin(self.url,
link.attrs['href']))
return None
async def send(self, config, entry):
""" Send a webmention to this target from the specified entry """
if self.endpoint:
LOGGER.debug("%s -> %s", entry.url, self.url)
try:
await self.endpoint.send(config, entry.url, self.url)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("Ping %s: got %s: %s",
self.url, err.__class__.__name__, err)
@async_lru.alru_cache(maxsize=1000)
|
PlaidWeb/Pushl | pushl/webmentions.py | Target.send | python | async def send(self, config, entry):
if self.endpoint:
LOGGER.debug("%s -> %s", entry.url, self.url)
try:
await self.endpoint.send(config, entry.url, self.url)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("Ping %s: got %s: %s",
self.url, err.__class__.__name__, err) | Send a webmention to this target from the specified entry | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/webmentions.py#L148-L156 | null | class Target:
""" A target of a webmention """
# pylint:disable=too-few-public-methods
def __init__(self, request):
self.url = str(request.url) # the canonical, final URL
self.status = request.status
self.caching = caching.make_headers(request.headers)
self.schema = SCHEMA_VERSION
if request.success and not request.cached:
self.endpoint = self._get_endpoint(request, request.text)
else:
self.endpoint = None
def _get_endpoint(self, request, text):
def join(url):
return urllib.parse.urljoin(self.url, str(url))
for rel, link in request.links.items():
if link.get('url') and 'webmention' in rel.split():
return WebmentionEndpoint(join(link.get('url')))
if 'X-Pingback' in request.headers:
return PingbackEndpoint(join(request.headers['X-Pingback']))
# Don't try to get a link tag out of a non-text document
ctype = request.headers.get('content-type')
if 'html' not in ctype and 'xml' not in ctype:
return None
soup = BeautifulSoup(text, 'html.parser')
for link in soup.find_all(('link', 'a'), rel='webmention'):
if link.attrs.get('href'):
return WebmentionEndpoint(
urllib.parse.urljoin(self.url,
link.attrs['href']))
for link in soup.find_all(('link', 'a'), rel='pingback'):
if link.attrs.get('href'):
return PingbackEndpoint(
urllib.parse.urljoin(self.url,
link.attrs['href']))
return None
|
PlaidWeb/Pushl | pushl/entries.py | get_entry | python | async def get_entry(config, url):
previous = config.cache.get(
'entry', url,
schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error("Could not get entry %s: %d", url,
request.status if request else -1)
return None, previous, False
# cache hit
if request.cached:
return previous, previous, False
current = Entry(request)
# Content updated
if config.cache:
config.cache.set('entry', url, current)
return current, previous, (not previous
or previous.digest != current.digest
or previous.status != current.status) | Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/entries.py#L108-L142 | [
"async def retry_get(config, url, *args, **kwargs):\n \"\"\" aiohttp wrapper for GET \"\"\"\n return await _retry_do(config.session.get, url, *args,\n **_make_headers(config, kwargs))\n"
] | """ Functions for handling entries """
import logging
import urllib.parse
import hashlib
from bs4 import BeautifulSoup
from . import caching, utils
LOGGER = logging.getLogger(__name__)
SCHEMA_VERSION = 3
class Entry:
""" Encapsulates a scanned entry """
# pylint:disable=too-few-public-methods
def __init__(self, request):
""" Build an Entry from a completed request """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url) # the canonical, final URL
self.status = request.status
self.caching = caching.make_headers(request.headers)
if 200 <= self.status < 300:
# We have new content, so parse out the relevant stuff
soup = BeautifulSoup(text, 'html.parser')
articles = self._get_articles(soup)
self._targets = []
for node in articles:
self._targets += [link.attrs
for link in node.find_all('a')
if 'href' in link.attrs]
self.feeds = [urllib.parse.urljoin(self.url, link.attrs['href'])
for link in soup.find_all('link')
if 'href' in link.attrs
and 'type' in link.attrs
and link.attrs['type'] in ('application/rss.xml',
'application/atom+xml')]
else:
self._targets = []
self.feeds = []
self.schema = SCHEMA_VERSION
@staticmethod
def _get_articles(soup):
return (soup.find_all(class_="h-entry")
or soup.find_all("article")
or soup.find_all(class_="entry")
or [soup])
@staticmethod
def _check_rel(attrs, rel_whitelist, rel_blacklist):
""" Check a link's relations against the whitelist or blacklist.
First, this will reject based on blacklist.
Next, if there is a whitelist, there must be at least one rel that matches.
To explicitly allow links without a rel you can add None to the whitelist
(e.g. ['in-reply-to',None])
"""
rels = attrs.get('rel', [None])
if rel_blacklist:
# Never return True for a link whose rel appears in the blacklist
for rel in rels:
if rel in rel_blacklist:
return False
if rel_whitelist:
# If there is a whitelist for rels, only return true for a rel that
# appears in it
for rel in rels:
if rel in rel_whitelist:
return True
# If there is a whitelist and we don't match, then reject
return False
return True
def _domain_differs(self, href):
""" Check that a link is not on the same domain as the source URL """
target = utils.get_domain(href)
if not target:
return False
origin = utils.get_domain(self.url)
return target != origin
def get_targets(self, config):
""" Given an Entry object, return all of the outgoing links. """
return {urllib.parse.urljoin(self.url, attrs['href'])
for attrs in self._targets
if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist)
and self._domain_differs(attrs['href'])}
|
PlaidWeb/Pushl | pushl/entries.py | Entry._check_rel | python | def _check_rel(attrs, rel_whitelist, rel_blacklist):
rels = attrs.get('rel', [None])
if rel_blacklist:
# Never return True for a link whose rel appears in the blacklist
for rel in rels:
if rel in rel_blacklist:
return False
if rel_whitelist:
# If there is a whitelist for rels, only return true for a rel that
# appears in it
for rel in rels:
if rel in rel_whitelist:
return True
# If there is a whitelist and we don't match, then reject
return False
return True | Check a link's relations against the whitelist or blacklist.
First, this will reject based on blacklist.
Next, if there is a whitelist, there must be at least one rel that matches.
To explicitly allow links without a rel you can add None to the whitelist
(e.g. ['in-reply-to',None]) | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/entries.py#L61-L88 | null | class Entry:
""" Encapsulates a scanned entry """
# pylint:disable=too-few-public-methods
def __init__(self, request):
""" Build an Entry from a completed request """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url) # the canonical, final URL
self.status = request.status
self.caching = caching.make_headers(request.headers)
if 200 <= self.status < 300:
# We have new content, so parse out the relevant stuff
soup = BeautifulSoup(text, 'html.parser')
articles = self._get_articles(soup)
self._targets = []
for node in articles:
self._targets += [link.attrs
for link in node.find_all('a')
if 'href' in link.attrs]
self.feeds = [urllib.parse.urljoin(self.url, link.attrs['href'])
for link in soup.find_all('link')
if 'href' in link.attrs
and 'type' in link.attrs
and link.attrs['type'] in ('application/rss.xml',
'application/atom+xml')]
else:
self._targets = []
self.feeds = []
self.schema = SCHEMA_VERSION
@staticmethod
def _get_articles(soup):
return (soup.find_all(class_="h-entry")
or soup.find_all("article")
or soup.find_all(class_="entry")
or [soup])
@staticmethod
def _domain_differs(self, href):
""" Check that a link is not on the same domain as the source URL """
target = utils.get_domain(href)
if not target:
return False
origin = utils.get_domain(self.url)
return target != origin
def get_targets(self, config):
""" Given an Entry object, return all of the outgoing links. """
return {urllib.parse.urljoin(self.url, attrs['href'])
for attrs in self._targets
if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist)
and self._domain_differs(attrs['href'])}
|
PlaidWeb/Pushl | pushl/entries.py | Entry._domain_differs | python | def _domain_differs(self, href):
target = utils.get_domain(href)
if not target:
return False
origin = utils.get_domain(self.url)
return target != origin | Check that a link is not on the same domain as the source URL | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/entries.py#L90-L97 | null | class Entry:
""" Encapsulates a scanned entry """
# pylint:disable=too-few-public-methods
def __init__(self, request):
""" Build an Entry from a completed request """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url) # the canonical, final URL
self.status = request.status
self.caching = caching.make_headers(request.headers)
if 200 <= self.status < 300:
# We have new content, so parse out the relevant stuff
soup = BeautifulSoup(text, 'html.parser')
articles = self._get_articles(soup)
self._targets = []
for node in articles:
self._targets += [link.attrs
for link in node.find_all('a')
if 'href' in link.attrs]
self.feeds = [urllib.parse.urljoin(self.url, link.attrs['href'])
for link in soup.find_all('link')
if 'href' in link.attrs
and 'type' in link.attrs
and link.attrs['type'] in ('application/rss.xml',
'application/atom+xml')]
else:
self._targets = []
self.feeds = []
self.schema = SCHEMA_VERSION
@staticmethod
def _get_articles(soup):
return (soup.find_all(class_="h-entry")
or soup.find_all("article")
or soup.find_all(class_="entry")
or [soup])
@staticmethod
def _check_rel(attrs, rel_whitelist, rel_blacklist):
""" Check a link's relations against the whitelist or blacklist.
First, this will reject based on blacklist.
Next, if there is a whitelist, there must be at least one rel that matches.
To explicitly allow links without a rel you can add None to the whitelist
(e.g. ['in-reply-to',None])
"""
rels = attrs.get('rel', [None])
if rel_blacklist:
# Never return True for a link whose rel appears in the blacklist
for rel in rels:
if rel in rel_blacklist:
return False
if rel_whitelist:
# If there is a whitelist for rels, only return true for a rel that
# appears in it
for rel in rels:
if rel in rel_whitelist:
return True
# If there is a whitelist and we don't match, then reject
return False
return True
def get_targets(self, config):
""" Given an Entry object, return all of the outgoing links. """
return {urllib.parse.urljoin(self.url, attrs['href'])
for attrs in self._targets
if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist)
and self._domain_differs(attrs['href'])}
|
PlaidWeb/Pushl | pushl/entries.py | Entry.get_targets | python | def get_targets(self, config):
return {urllib.parse.urljoin(self.url, attrs['href'])
for attrs in self._targets
if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist)
and self._domain_differs(attrs['href'])} | Given an Entry object, return all of the outgoing links. | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/entries.py#L99-L105 | null | class Entry:
""" Encapsulates a scanned entry """
# pylint:disable=too-few-public-methods
def __init__(self, request):
""" Build an Entry from a completed request """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url) # the canonical, final URL
self.status = request.status
self.caching = caching.make_headers(request.headers)
if 200 <= self.status < 300:
# We have new content, so parse out the relevant stuff
soup = BeautifulSoup(text, 'html.parser')
articles = self._get_articles(soup)
self._targets = []
for node in articles:
self._targets += [link.attrs
for link in node.find_all('a')
if 'href' in link.attrs]
self.feeds = [urllib.parse.urljoin(self.url, link.attrs['href'])
for link in soup.find_all('link')
if 'href' in link.attrs
and 'type' in link.attrs
and link.attrs['type'] in ('application/rss.xml',
'application/atom+xml')]
else:
self._targets = []
self.feeds = []
self.schema = SCHEMA_VERSION
@staticmethod
def _get_articles(soup):
return (soup.find_all(class_="h-entry")
or soup.find_all("article")
or soup.find_all(class_="entry")
or [soup])
@staticmethod
def _check_rel(attrs, rel_whitelist, rel_blacklist):
""" Check a link's relations against the whitelist or blacklist.
First, this will reject based on blacklist.
Next, if there is a whitelist, there must be at least one rel that matches.
To explicitly allow links without a rel you can add None to the whitelist
(e.g. ['in-reply-to',None])
"""
rels = attrs.get('rel', [None])
if rel_blacklist:
# Never return True for a link whose rel appears in the blacklist
for rel in rels:
if rel in rel_blacklist:
return False
if rel_whitelist:
# If there is a whitelist for rels, only return true for a rel that
# appears in it
for rel in rels:
if rel in rel_whitelist:
return True
# If there is a whitelist and we don't match, then reject
return False
return True
def _domain_differs(self, href):
""" Check that a link is not on the same domain as the source URL """
target = utils.get_domain(href)
if not target:
return False
origin = utils.get_domain(self.url)
return target != origin
|
PlaidWeb/Pushl | pushl/utils.py | guess_encoding | python | def guess_encoding(request):
ctype = request.headers.get('content-type')
if not ctype:
# we don't have a content-type, somehow, so...
LOGGER.warning("%s: no content-type; headers are %s",
request.url, request.headers)
return 'utf-8'
# explicit declaration
match = re.search(r'charset=([^ ;]*)(;| |$)', ctype)
if match:
return match[1]
# html default
if ctype.startswith('text/html'):
return 'iso-8859-1'
# everything else's default
return 'utf-8' | Try to guess the encoding of a request without going through the slow chardet process | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/utils.py#L15-L34 | null | """ Utility functions """
import re
import logging
import sys
import asyncio
import urllib.parse
import ssl
import aiohttp
LOGGER = logging.getLogger('utils')
def get_domain(url):
""" Get the domain part of a URL """
return urllib.parse.urlparse(url).netloc.lower()
class RequestResult:
""" The results we need from a request """
def __init__(self, request, data):
self.url = request.url
self.headers = request.headers
self.status = request.status
self.links = request.links
if data:
self.text = data.decode(guess_encoding(request), 'ignore')
else:
self.text = ''
@property
def success(self):
""" Was this request successful? """
return 200 <= self.status < 300 or self.cached or self.gone
@property
def gone(self):
""" Is this request for a deleted resource? """
return self.status == 410
@property
def cached(self):
""" Is this request for a cache hit? """
return self.status == 304
async def _retry_do(func, url, *args, **kwargs):
errors = set()
for retries in range(5):
try:
async with func(url, *args, **kwargs) as request:
if request.status == 304:
return RequestResult(request, None)
return RequestResult(request, await request.read())
except aiohttp.client_exceptions.ClientResponseError as err:
LOGGER.warning("%s: got client response error: %s", url, str(err))
return None
except ssl.SSLError as err:
LOGGER.warning(
"%s: SSL error: %s", url, str(err))
return None
except Exception: # pylint:disable=broad-except
exc_type, exc_value, _ = sys.exc_info()
LOGGER.debug("%s: got error %s %s (retry=%d)", url,
exc_type, exc_value, retries)
errors.add(str(exc_value))
await asyncio.sleep(retries)
LOGGER.warning("%s: Exceeded maximum retries; errors: %s", url, errors)
return None
def _make_headers(config, kwargs):
""" Replace the kwargs with one where the headers include our user-agent """
headers = kwargs.get('headers')
headers = headers.copy() if headers is not None else {}
headers['User-Agent'] = config.args.user_agent
kwargs = kwargs.copy()
kwargs['headers'] = headers
return kwargs
async def retry_get(config, url, *args, **kwargs):
""" aiohttp wrapper for GET """
return await _retry_do(config.session.get, url, *args,
**_make_headers(config, kwargs))
async def retry_post(config, url, *args, **kwargs):
""" aiohttp wrapper for POST """
return await _retry_do(config.session.post, url, *args,
**_make_headers(config, kwargs))
|
PlaidWeb/Pushl | pushl/utils.py | _make_headers | python | def _make_headers(config, kwargs):
headers = kwargs.get('headers')
headers = headers.copy() if headers is not None else {}
headers['User-Agent'] = config.args.user_agent
kwargs = kwargs.copy()
kwargs['headers'] = headers
return kwargs | Replace the kwargs with one where the headers include our user-agent | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/utils.py#L97-L106 | null | """ Utility functions """
import re
import logging
import sys
import asyncio
import urllib.parse
import ssl
import aiohttp
LOGGER = logging.getLogger('utils')
def guess_encoding(request):
""" Try to guess the encoding of a request without going through the slow chardet process"""
ctype = request.headers.get('content-type')
if not ctype:
# we don't have a content-type, somehow, so...
LOGGER.warning("%s: no content-type; headers are %s",
request.url, request.headers)
return 'utf-8'
# explicit declaration
match = re.search(r'charset=([^ ;]*)(;| |$)', ctype)
if match:
return match[1]
# html default
if ctype.startswith('text/html'):
return 'iso-8859-1'
# everything else's default
return 'utf-8'
def get_domain(url):
""" Get the domain part of a URL """
return urllib.parse.urlparse(url).netloc.lower()
class RequestResult:
""" The results we need from a request """
def __init__(self, request, data):
self.url = request.url
self.headers = request.headers
self.status = request.status
self.links = request.links
if data:
self.text = data.decode(guess_encoding(request), 'ignore')
else:
self.text = ''
@property
def success(self):
""" Was this request successful? """
return 200 <= self.status < 300 or self.cached or self.gone
@property
def gone(self):
""" Is this request for a deleted resource? """
return self.status == 410
@property
def cached(self):
""" Is this request for a cache hit? """
return self.status == 304
async def _retry_do(func, url, *args, **kwargs):
errors = set()
for retries in range(5):
try:
async with func(url, *args, **kwargs) as request:
if request.status == 304:
return RequestResult(request, None)
return RequestResult(request, await request.read())
except aiohttp.client_exceptions.ClientResponseError as err:
LOGGER.warning("%s: got client response error: %s", url, str(err))
return None
except ssl.SSLError as err:
LOGGER.warning(
"%s: SSL error: %s", url, str(err))
return None
except Exception: # pylint:disable=broad-except
exc_type, exc_value, _ = sys.exc_info()
LOGGER.debug("%s: got error %s %s (retry=%d)", url,
exc_type, exc_value, retries)
errors.add(str(exc_value))
await asyncio.sleep(retries)
LOGGER.warning("%s: Exceeded maximum retries; errors: %s", url, errors)
return None
async def retry_get(config, url, *args, **kwargs):
""" aiohttp wrapper for GET """
return await _retry_do(config.session.get, url, *args,
**_make_headers(config, kwargs))
async def retry_post(config, url, *args, **kwargs):
""" aiohttp wrapper for POST """
return await _retry_do(config.session.post, url, *args,
**_make_headers(config, kwargs))
|
PlaidWeb/Pushl | pushl/utils.py | retry_get | python | async def retry_get(config, url, *args, **kwargs):
return await _retry_do(config.session.get, url, *args,
**_make_headers(config, kwargs)) | aiohttp wrapper for GET | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/utils.py#L109-L112 | [
"async def _retry_do(func, url, *args, **kwargs):\n errors = set()\n for retries in range(5):\n try:\n async with func(url, *args, **kwargs) as request:\n if request.status == 304:\n return RequestResult(request, None)\n return RequestResult(r... | """ Utility functions """
import re
import logging
import sys
import asyncio
import urllib.parse
import ssl
import aiohttp
LOGGER = logging.getLogger('utils')
def guess_encoding(request):
""" Try to guess the encoding of a request without going through the slow chardet process"""
ctype = request.headers.get('content-type')
if not ctype:
# we don't have a content-type, somehow, so...
LOGGER.warning("%s: no content-type; headers are %s",
request.url, request.headers)
return 'utf-8'
# explicit declaration
match = re.search(r'charset=([^ ;]*)(;| |$)', ctype)
if match:
return match[1]
# html default
if ctype.startswith('text/html'):
return 'iso-8859-1'
# everything else's default
return 'utf-8'
def get_domain(url):
""" Get the domain part of a URL """
return urllib.parse.urlparse(url).netloc.lower()
class RequestResult:
""" The results we need from a request """
def __init__(self, request, data):
self.url = request.url
self.headers = request.headers
self.status = request.status
self.links = request.links
if data:
self.text = data.decode(guess_encoding(request), 'ignore')
else:
self.text = ''
@property
def success(self):
""" Was this request successful? """
return 200 <= self.status < 300 or self.cached or self.gone
@property
def gone(self):
""" Is this request for a deleted resource? """
return self.status == 410
@property
def cached(self):
""" Is this request for a cache hit? """
return self.status == 304
async def _retry_do(func, url, *args, **kwargs):
errors = set()
for retries in range(5):
try:
async with func(url, *args, **kwargs) as request:
if request.status == 304:
return RequestResult(request, None)
return RequestResult(request, await request.read())
except aiohttp.client_exceptions.ClientResponseError as err:
LOGGER.warning("%s: got client response error: %s", url, str(err))
return None
except ssl.SSLError as err:
LOGGER.warning(
"%s: SSL error: %s", url, str(err))
return None
except Exception: # pylint:disable=broad-except
exc_type, exc_value, _ = sys.exc_info()
LOGGER.debug("%s: got error %s %s (retry=%d)", url,
exc_type, exc_value, retries)
errors.add(str(exc_value))
await asyncio.sleep(retries)
LOGGER.warning("%s: Exceeded maximum retries; errors: %s", url, errors)
return None
def _make_headers(config, kwargs):
""" Replace the kwargs with one where the headers include our user-agent """
headers = kwargs.get('headers')
headers = headers.copy() if headers is not None else {}
headers['User-Agent'] = config.args.user_agent
kwargs = kwargs.copy()
kwargs['headers'] = headers
return kwargs
async def retry_post(config, url, *args, **kwargs):
""" aiohttp wrapper for POST """
return await _retry_do(config.session.post, url, *args,
**_make_headers(config, kwargs))
|
PlaidWeb/Pushl | pushl/utils.py | retry_post | python | async def retry_post(config, url, *args, **kwargs):
return await _retry_do(config.session.post, url, *args,
**_make_headers(config, kwargs)) | aiohttp wrapper for POST | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/utils.py#L115-L118 | [
"async def _retry_do(func, url, *args, **kwargs):\n errors = set()\n for retries in range(5):\n try:\n async with func(url, *args, **kwargs) as request:\n if request.status == 304:\n return RequestResult(request, None)\n return RequestResult(r... | """ Utility functions """
import re
import logging
import sys
import asyncio
import urllib.parse
import ssl
import aiohttp
LOGGER = logging.getLogger('utils')
def guess_encoding(request):
""" Try to guess the encoding of a request without going through the slow chardet process"""
ctype = request.headers.get('content-type')
if not ctype:
# we don't have a content-type, somehow, so...
LOGGER.warning("%s: no content-type; headers are %s",
request.url, request.headers)
return 'utf-8'
# explicit declaration
match = re.search(r'charset=([^ ;]*)(;| |$)', ctype)
if match:
return match[1]
# html default
if ctype.startswith('text/html'):
return 'iso-8859-1'
# everything else's default
return 'utf-8'
def get_domain(url):
""" Get the domain part of a URL """
return urllib.parse.urlparse(url).netloc.lower()
class RequestResult:
""" The results we need from a request """
def __init__(self, request, data):
self.url = request.url
self.headers = request.headers
self.status = request.status
self.links = request.links
if data:
self.text = data.decode(guess_encoding(request), 'ignore')
else:
self.text = ''
@property
def success(self):
""" Was this request successful? """
return 200 <= self.status < 300 or self.cached or self.gone
@property
def gone(self):
""" Is this request for a deleted resource? """
return self.status == 410
@property
def cached(self):
""" Is this request for a cache hit? """
return self.status == 304
async def _retry_do(func, url, *args, **kwargs):
errors = set()
for retries in range(5):
try:
async with func(url, *args, **kwargs) as request:
if request.status == 304:
return RequestResult(request, None)
return RequestResult(request, await request.read())
except aiohttp.client_exceptions.ClientResponseError as err:
LOGGER.warning("%s: got client response error: %s", url, str(err))
return None
except ssl.SSLError as err:
LOGGER.warning(
"%s: SSL error: %s", url, str(err))
return None
except Exception: # pylint:disable=broad-except
exc_type, exc_value, _ = sys.exc_info()
LOGGER.debug("%s: got error %s %s (retry=%d)", url,
exc_type, exc_value, retries)
errors.add(str(exc_value))
await asyncio.sleep(retries)
LOGGER.warning("%s: Exceeded maximum retries; errors: %s", url, errors)
return None
def _make_headers(config, kwargs):
""" Replace the kwargs with one where the headers include our user-agent """
headers = kwargs.get('headers')
headers = headers.copy() if headers is not None else {}
headers['User-Agent'] = config.args.user_agent
kwargs = kwargs.copy()
kwargs['headers'] = headers
return kwargs
async def retry_get(config, url, *args, **kwargs):
""" aiohttp wrapper for GET """
return await _retry_do(config.session.get, url, *args,
**_make_headers(config, kwargs))
|
biocore/deblur | deblur/parallel_deblur.py | deblur_system_call | python | def deblur_system_call(params, input_fp):
logger = logging.getLogger(__name__)
logger.debug('[%s] deblur system call params %s, input_fp %s' %
(mp.current_process().name, params, input_fp))
# construct command
script_name = "deblur"
script_subprogram = "workflow"
command = [script_name,
script_subprogram,
'--seqs-fp', input_fp,
'--is-worker-thread',
'--keep-tmp-files']
command.extend(params)
logger.debug('[%s] running command %s' % (mp.current_process().name,
command))
return _system_call(command) | Build deblur command for subprocess.
Parameters
----------
params: list of str
parameter settings to pass to deblur CLI
input_fp : str
name of the input fasta file to deblur
Returns
-------
stdout: string
process output directed to standard output
stderr: string
process output directed to standard error
return_value: integer
return code from process | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/parallel_deblur.py#L17-L53 | [
"def _system_call(cmd, stdoutfilename=None):\n \"\"\"Execute the command `cmd`\n Parameters\n ----------\n cmd : str\n The string containing the command to be run.\n stdoutfilename : str\n Name of the file to save stdout to or None\n (default) to not save to file\n stderrfilen... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from .workflow import _system_call
import multiprocessing as mp
import traceback
import sys
from functools import partial
import logging
def run_functor(functor, *args, **kwargs):
"""
Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
"""
try:
# This is where you do your actual work
return functor(*args, **kwargs)
except Exception:
# Put all exception text into an exception and raise that
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
def parallel_deblur(inputs, params,
pos_ref_db_fp, neg_ref_dp_fp, jobs_to_start=1):
"""Dispatch execution over a pool of processors
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
Parameters
----------
inputs : iterable of str
File paths to input per-sample sequence files
params : list of str
list of CLI parameters supplied to the deblur workflow
(argv - first 2 are 'deblur','workflow' and are ignored)
pos_ref_db_fp : list of str
the indexed positive (16s) sortmerna database
(created in the main thread)
neg_ref_db_fp : list of str
the indexed negative (artifacts) sortmerna database
(created in the main thread)
jobs_to_start : int, optional
The number of processors on the local system to use
Returns
-------
all_result_paths : list
list of expected output files
"""
logger = logging.getLogger(__name__)
logger.info('parallel deblur started for %d inputs' % len(inputs))
# remove the irrelevant parameters
remove_param_list = ['-O', '--jobs-to-start', '--seqs-fp',
'--pos-ref-db-fp', '--neg-ref-db-fp']
skipnext = False
newparams = []
for carg in params[2:]:
if skipnext:
skipnext = False
continue
if carg in remove_param_list:
skipnext = True
continue
newparams.append(carg)
# add the ref_db_fp (since it may be not present in the
# original command parameters)
if pos_ref_db_fp:
new_pos_ref_db_fp = ','.join(pos_ref_db_fp)
newparams.append('--pos-ref-db-fp')
newparams.append(new_pos_ref_db_fp)
if neg_ref_dp_fp:
new_neg_ref_db_fp = ','.join(neg_ref_dp_fp)
newparams.append('--neg-ref-db-fp')
newparams.append(new_neg_ref_db_fp)
logger.debug('ready for functor %s' % newparams)
functor = partial(run_functor, deblur_system_call, newparams)
logger.debug('ready for pool %d jobs' % jobs_to_start)
pool = mp.Pool(processes=jobs_to_start)
logger.debug('almost running...')
for stdout, stderr, es in pool.map(functor, inputs):
if es != 0:
raise RuntimeError("stdout: %s\nstderr: %s\nexit: %d" % (stdout,
stderr,
es))
|
biocore/deblur | deblur/parallel_deblur.py | run_functor | python | def run_functor(functor, *args, **kwargs):
try:
# This is where you do your actual work
return functor(*args, **kwargs)
except Exception:
# Put all exception text into an exception and raise that
raise Exception("".join(traceback.format_exception(*sys.exc_info()))) | Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/parallel_deblur.py#L56-L74 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from .workflow import _system_call
import multiprocessing as mp
import traceback
import sys
from functools import partial
import logging
def deblur_system_call(params, input_fp):
"""Build deblur command for subprocess.
Parameters
----------
params: list of str
parameter settings to pass to deblur CLI
input_fp : str
name of the input fasta file to deblur
Returns
-------
stdout: string
process output directed to standard output
stderr: string
process output directed to standard error
return_value: integer
return code from process
"""
logger = logging.getLogger(__name__)
logger.debug('[%s] deblur system call params %s, input_fp %s' %
(mp.current_process().name, params, input_fp))
# construct command
script_name = "deblur"
script_subprogram = "workflow"
command = [script_name,
script_subprogram,
'--seqs-fp', input_fp,
'--is-worker-thread',
'--keep-tmp-files']
command.extend(params)
logger.debug('[%s] running command %s' % (mp.current_process().name,
command))
return _system_call(command)
def parallel_deblur(inputs, params,
pos_ref_db_fp, neg_ref_dp_fp, jobs_to_start=1):
"""Dispatch execution over a pool of processors
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
Parameters
----------
inputs : iterable of str
File paths to input per-sample sequence files
params : list of str
list of CLI parameters supplied to the deblur workflow
(argv - first 2 are 'deblur','workflow' and are ignored)
pos_ref_db_fp : list of str
the indexed positive (16s) sortmerna database
(created in the main thread)
neg_ref_db_fp : list of str
the indexed negative (artifacts) sortmerna database
(created in the main thread)
jobs_to_start : int, optional
The number of processors on the local system to use
Returns
-------
all_result_paths : list
list of expected output files
"""
logger = logging.getLogger(__name__)
logger.info('parallel deblur started for %d inputs' % len(inputs))
# remove the irrelevant parameters
remove_param_list = ['-O', '--jobs-to-start', '--seqs-fp',
'--pos-ref-db-fp', '--neg-ref-db-fp']
skipnext = False
newparams = []
for carg in params[2:]:
if skipnext:
skipnext = False
continue
if carg in remove_param_list:
skipnext = True
continue
newparams.append(carg)
# add the ref_db_fp (since it may be not present in the
# original command parameters)
if pos_ref_db_fp:
new_pos_ref_db_fp = ','.join(pos_ref_db_fp)
newparams.append('--pos-ref-db-fp')
newparams.append(new_pos_ref_db_fp)
if neg_ref_dp_fp:
new_neg_ref_db_fp = ','.join(neg_ref_dp_fp)
newparams.append('--neg-ref-db-fp')
newparams.append(new_neg_ref_db_fp)
logger.debug('ready for functor %s' % newparams)
functor = partial(run_functor, deblur_system_call, newparams)
logger.debug('ready for pool %d jobs' % jobs_to_start)
pool = mp.Pool(processes=jobs_to_start)
logger.debug('almost running...')
for stdout, stderr, es in pool.map(functor, inputs):
if es != 0:
raise RuntimeError("stdout: %s\nstderr: %s\nexit: %d" % (stdout,
stderr,
es))
|
biocore/deblur | deblur/parallel_deblur.py | parallel_deblur | python | def parallel_deblur(inputs, params,
pos_ref_db_fp, neg_ref_dp_fp, jobs_to_start=1):
logger = logging.getLogger(__name__)
logger.info('parallel deblur started for %d inputs' % len(inputs))
# remove the irrelevant parameters
remove_param_list = ['-O', '--jobs-to-start', '--seqs-fp',
'--pos-ref-db-fp', '--neg-ref-db-fp']
skipnext = False
newparams = []
for carg in params[2:]:
if skipnext:
skipnext = False
continue
if carg in remove_param_list:
skipnext = True
continue
newparams.append(carg)
# add the ref_db_fp (since it may be not present in the
# original command parameters)
if pos_ref_db_fp:
new_pos_ref_db_fp = ','.join(pos_ref_db_fp)
newparams.append('--pos-ref-db-fp')
newparams.append(new_pos_ref_db_fp)
if neg_ref_dp_fp:
new_neg_ref_db_fp = ','.join(neg_ref_dp_fp)
newparams.append('--neg-ref-db-fp')
newparams.append(new_neg_ref_db_fp)
logger.debug('ready for functor %s' % newparams)
functor = partial(run_functor, deblur_system_call, newparams)
logger.debug('ready for pool %d jobs' % jobs_to_start)
pool = mp.Pool(processes=jobs_to_start)
logger.debug('almost running...')
for stdout, stderr, es in pool.map(functor, inputs):
if es != 0:
raise RuntimeError("stdout: %s\nstderr: %s\nexit: %d" % (stdout,
stderr,
es)) | Dispatch execution over a pool of processors
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
Parameters
----------
inputs : iterable of str
File paths to input per-sample sequence files
params : list of str
list of CLI parameters supplied to the deblur workflow
(argv - first 2 are 'deblur','workflow' and are ignored)
pos_ref_db_fp : list of str
the indexed positive (16s) sortmerna database
(created in the main thread)
neg_ref_db_fp : list of str
the indexed negative (artifacts) sortmerna database
(created in the main thread)
jobs_to_start : int, optional
The number of processors on the local system to use
Returns
-------
all_result_paths : list
list of expected output files | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/parallel_deblur.py#L77-L142 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from .workflow import _system_call
import multiprocessing as mp
import traceback
import sys
from functools import partial
import logging
def deblur_system_call(params, input_fp):
"""Build deblur command for subprocess.
Parameters
----------
params: list of str
parameter settings to pass to deblur CLI
input_fp : str
name of the input fasta file to deblur
Returns
-------
stdout: string
process output directed to standard output
stderr: string
process output directed to standard error
return_value: integer
return code from process
"""
logger = logging.getLogger(__name__)
logger.debug('[%s] deblur system call params %s, input_fp %s' %
(mp.current_process().name, params, input_fp))
# construct command
script_name = "deblur"
script_subprogram = "workflow"
command = [script_name,
script_subprogram,
'--seqs-fp', input_fp,
'--is-worker-thread',
'--keep-tmp-files']
command.extend(params)
logger.debug('[%s] running command %s' % (mp.current_process().name,
command))
return _system_call(command)
def run_functor(functor, *args, **kwargs):
"""
Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
"""
try:
# This is where you do your actual work
return functor(*args, **kwargs)
except Exception:
# Put all exception text into an exception and raise that
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
|
biocore/deblur | deblur/workflow.py | sequence_generator | python | def sequence_generator(input_fp):
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record)) | Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence. | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L54-L100 | [
"def _get_fastq_variant(input_fp):\n # http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters\n variant = None\n variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']\n for v in variants:\n try:\n next(skbio.read(input_fp, format='fastq', varia... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | trim_seqs | python | def trim_seqs(input_seqs, trim_len, left_trim_len):
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs)) | Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L103-L150 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | dereplicate_seqs | python | def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return | Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available) | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L153-L193 | [
"def _system_call(cmd, stdoutfilename=None):\n \"\"\"Execute the command `cmd`\n Parameters\n ----------\n cmd : str\n The string containing the command to be run.\n stdoutfilename : str\n Name of the file to save stdout to or None\n (default) to not save to file\n stderrfilen... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | build_index_sortmerna | python | def build_index_sortmerna(ref_fp, working_dir):
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db | Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L196-L232 | [
"def _system_call(cmd, stdoutfilename=None):\n \"\"\"Execute the command `cmd`\n Parameters\n ----------\n cmd : str\n The string containing the command to be run.\n stdoutfilename : str\n Name of the file to save stdout to or None\n (default) to not save to file\n stderrfilen... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | filter_minreads_samples_from_table | python | def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table | Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L235-L265 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | fasta_from_biom | python | def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name) | Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L268-L284 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | remove_artifacts_from_biom_table | python | def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files | Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L287-L363 | [
"def remove_artifacts_seqs(seqs_fp,\n ref_fp,\n working_dir,\n ref_db_fp,\n negate=False,\n threads=1,\n verbose=False,\n sim_thresh=None,\n ... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | remove_artifacts_seqs | python | def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename] | Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L366-L493 | [
"def sequence_generator(input_fp):\n \"\"\"Yield (id, sequence) from an input file\n\n Parameters\n ----------\n input_fp : filepath\n A filepath, which can be any valid fasta or fastq file within the\n limitations of scikit-bio's IO registry.\n\n Notes\n -----\n The use of this m... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | multiple_sequence_alignment | python | def multiple_sequence_alignment(seqs_fp, threads=1):
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp | Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L496-L529 | [
"def _system_call(cmd, stdoutfilename=None):\n \"\"\"Execute the command `cmd`\n Parameters\n ----------\n cmd : str\n The string containing the command to be run.\n stdoutfilename : str\n Name of the file to save stdout to or None\n (default) to not save to file\n stderrfilen... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | remove_chimeras_denovo_from_seqs | python | def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp | Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L532-L570 | [
"def _system_call(cmd, stdoutfilename=None):\n \"\"\"Execute the command `cmd`\n Parameters\n ----------\n cmd : str\n The string containing the command to be run.\n stdoutfilename : str\n Name of the file to save stdout to or None\n (default) to not save to file\n stderrfilen... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | split_sequence_file_on_sample_ids_to_files | python | def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs)) | Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L596-L621 | [
"def sequence_generator(input_fp):\n \"\"\"Yield (id, sequence) from an input file\n\n Parameters\n ----------\n input_fp : filepath\n A filepath, which can be any valid fasta or fastq file within the\n limitations of scikit-bio's IO registry.\n\n Notes\n -----\n The use of this m... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | write_biom_table | python | def write_biom_table(table, biom_fp):
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp) | Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L624-L638 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | get_files_for_table | python | def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names | Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path) | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L641-L673 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | create_otu_table | python | def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp) | Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L676-L774 | [
"def sequence_generator(input_fp):\n \"\"\"Yield (id, sequence) from an input file\n\n Parameters\n ----------\n input_fp : filepath\n A filepath, which can be any valid fasta or fastq file within the\n limitations of scikit-bio's IO registry.\n\n Notes\n -----\n The use of this m... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | launch_workflow | python | def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp | Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L777-L895 | [
"def deblur(input_seqs, mean_error=0.005,\n error_dist=None,\n indel_prob=0.01, indel_max=3):\n \"\"\"Deblur the reads\n\n Parameters\n ----------\n input_seqs : iterable of (str, str)\n The list of input sequences in (label, sequence) format. The label\n should include... | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | start_log | python | def start_log(level=logging.DEBUG, filename=None):
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started') | start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L898-L919 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
biocore/deblur | deblur/workflow.py | _system_call | python | def _system_call(cmd, stdoutfilename=None):
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value | Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license. | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L922-L965 | null | # ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except Exception:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
|
biocore/deblur | deblur/deblurring.py | get_sequences | python | def get_sequences(input_seqs):
try:
seqs = [Sequence(id, seq) for id, seq in input_seqs]
except Exception:
seqs = []
if len(seqs) == 0:
logger = logging.getLogger(__name__)
logger.warn('No sequences found in fasta file!')
return None
# Check that all the sequence lengths (aligned and unaligned are the same)
aligned_lengths = set(s.length for s in seqs)
unaligned_lengths = set(s.unaligned_length for s in seqs)
if len(aligned_lengths) != 1 or len(unaligned_lengths) != 1:
raise ValueError(
"Not all sequence have the same length. Aligned lengths: %s, "
"sequence lengths: %s"
% (", ".join(map(str, aligned_lengths)),
", ".join(map(str, unaligned_lengths))))
seqs = sorted(seqs, key=attrgetter('frequency'), reverse=True)
return seqs | Returns a list of Sequences
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
Returns
-------
list of Sequence
Raises
------
ValueError
If no sequences where found in `input_seqs`
If all the sequences do not have the same length either aligned or
unaligned. | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/deblurring.py#L27-L68 | null | # -----------------------------------------------------------------------------
# Copyright (c) 2013, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from operator import attrgetter
import numpy as np
import logging
from deblur.sequence import Sequence
def get_default_error_profile():
"""Return the default error profile for deblurring
based on illumina run data
"""
error_dist = [1, 0.06, 0.02, 0.02, 0.01,
0.005, 0.005, 0.005, 0.001, 0.001,
0.001, 0.0005]
return error_dist
def deblur(input_seqs, mean_error=0.005,
error_dist=None,
indel_prob=0.01, indel_max=3):
"""Deblur the reads
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format. The label
should include the sequence count in the 'size=X' format.
mean_error : float, optional
The mean illumina error, used for original sequence estimate.
Default: 0.005
error_dist : list of float, optional
A list of error probabilities. The length of the list determines the
amount of hamming distances taken into account. Default: None, use
the default error profile (from get_default_error_profile() )
indel_prob : float, optional
Indel probability (same for N indels). Default: 0.01
indel_max : int, optional
The maximal number of indels expected by errors. Default: 3
Results
-------
list of Sequence
The deblurred sequences
Notes
-----
mean_error is used only for normalizing the peak height before deblurring.
The array 'error_dist' represents the error distribution, where
Xi = max frequency of error hamming. The length of this array - 1 limits
the hamming distance taken into account, i.e. if the length if `error_dist`
is 10, sequences up to 10 - 1 = 9 hamming distance will be taken into
account
"""
logger = logging.getLogger(__name__)
if error_dist is None:
error_dist = get_default_error_profile()
logger.debug('Using error profile %s' % error_dist)
# Get the sequences
seqs = get_sequences(input_seqs)
if seqs is None:
logger.warn('no sequences deblurred')
return None
logger.info('deblurring %d sequences' % len(seqs))
# fix the original frequencies of each read error using the
# mean error profile
mod_factor = pow((1 - mean_error), seqs[0].unaligned_length)
error_dist = np.array(error_dist) / mod_factor
max_h_dist = len(error_dist) - 1
for seq_i in seqs:
# no need to remove neighbors if freq. is <=0
if seq_i.frequency <= 0:
continue
# Correct for the fact that many reads are expected to be mutated
num_err = error_dist * seq_i.frequency
# if it's low level, just continue
if num_err[1] < 0.1:
continue
# Compare to all other sequences and calculate hamming dist
seq_i_len = len(seq_i.sequence.rstrip('-'))
for seq_j in seqs:
# Ignore current sequence
if seq_i == seq_j:
continue
# Calculate the hamming distance
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence,
seq_j.np_sequence))
# If far away, don't need to correct
if h_dist > max_h_dist:
continue
# Close, so lets calculate exact distance
# We stop checking in the shortest sequence after removing trailing
# indels. We need to do this in order to avoid double counting
# the insertions/deletions
length = min(seq_i_len, len(seq_j.sequence.rstrip('-')))
sub_seq_i = seq_i.np_sequence[:length]
sub_seq_j = seq_j.np_sequence[:length]
mask = (sub_seq_i != sub_seq_j)
# find all indels
mut_is_indel = np.logical_or(sub_seq_i[mask] == 4,
sub_seq_j[mask] == 4)
num_indels = mut_is_indel.sum()
if num_indels > 0:
# need to account for indel in one sequence not solved in the other
# (so we have '-' at the end. Need to ignore it in the total count)
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence[:length],
seq_j.np_sequence[:length]))
num_substitutions = h_dist - num_indels
correction_value = num_err[num_substitutions]
if num_indels > indel_max:
correction_value = 0
elif num_indels > 0:
# remove errors due to (PCR?) indels (saw in 22 mock mixture)
correction_value = correction_value * indel_prob
# met all the criteria - so correct the frequency of the neighbor
seq_j.frequency -= correction_value
result = [s for s in seqs if round(s.frequency) > 0]
logger.info('%d unique sequences left following deblurring' % len(result))
return result
|
biocore/deblur | deblur/deblurring.py | deblur | python | def deblur(input_seqs, mean_error=0.005,
error_dist=None,
indel_prob=0.01, indel_max=3):
logger = logging.getLogger(__name__)
if error_dist is None:
error_dist = get_default_error_profile()
logger.debug('Using error profile %s' % error_dist)
# Get the sequences
seqs = get_sequences(input_seqs)
if seqs is None:
logger.warn('no sequences deblurred')
return None
logger.info('deblurring %d sequences' % len(seqs))
# fix the original frequencies of each read error using the
# mean error profile
mod_factor = pow((1 - mean_error), seqs[0].unaligned_length)
error_dist = np.array(error_dist) / mod_factor
max_h_dist = len(error_dist) - 1
for seq_i in seqs:
# no need to remove neighbors if freq. is <=0
if seq_i.frequency <= 0:
continue
# Correct for the fact that many reads are expected to be mutated
num_err = error_dist * seq_i.frequency
# if it's low level, just continue
if num_err[1] < 0.1:
continue
# Compare to all other sequences and calculate hamming dist
seq_i_len = len(seq_i.sequence.rstrip('-'))
for seq_j in seqs:
# Ignore current sequence
if seq_i == seq_j:
continue
# Calculate the hamming distance
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence,
seq_j.np_sequence))
# If far away, don't need to correct
if h_dist > max_h_dist:
continue
# Close, so lets calculate exact distance
# We stop checking in the shortest sequence after removing trailing
# indels. We need to do this in order to avoid double counting
# the insertions/deletions
length = min(seq_i_len, len(seq_j.sequence.rstrip('-')))
sub_seq_i = seq_i.np_sequence[:length]
sub_seq_j = seq_j.np_sequence[:length]
mask = (sub_seq_i != sub_seq_j)
# find all indels
mut_is_indel = np.logical_or(sub_seq_i[mask] == 4,
sub_seq_j[mask] == 4)
num_indels = mut_is_indel.sum()
if num_indels > 0:
# need to account for indel in one sequence not solved in the other
# (so we have '-' at the end. Need to ignore it in the total count)
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence[:length],
seq_j.np_sequence[:length]))
num_substitutions = h_dist - num_indels
correction_value = num_err[num_substitutions]
if num_indels > indel_max:
correction_value = 0
elif num_indels > 0:
# remove errors due to (PCR?) indels (saw in 22 mock mixture)
correction_value = correction_value * indel_prob
# met all the criteria - so correct the frequency of the neighbor
seq_j.frequency -= correction_value
result = [s for s in seqs if round(s.frequency) > 0]
logger.info('%d unique sequences left following deblurring' % len(result))
return result | Deblur the reads
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format. The label
should include the sequence count in the 'size=X' format.
mean_error : float, optional
The mean illumina error, used for original sequence estimate.
Default: 0.005
error_dist : list of float, optional
A list of error probabilities. The length of the list determines the
amount of hamming distances taken into account. Default: None, use
the default error profile (from get_default_error_profile() )
indel_prob : float, optional
Indel probability (same for N indels). Default: 0.01
indel_max : int, optional
The maximal number of indels expected by errors. Default: 3
Results
-------
list of Sequence
The deblurred sequences
Notes
-----
mean_error is used only for normalizing the peak height before deblurring.
The array 'error_dist' represents the error distribution, where
Xi = max frequency of error hamming. The length of this array - 1 limits
the hamming distance taken into account, i.e. if the length if `error_dist`
is 10, sequences up to 10 - 1 = 9 hamming distance will be taken into
account | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/deblurring.py#L71-L189 | [
"def get_default_error_profile():\n \"\"\"Return the default error profile for deblurring\n based on illumina run data\n \"\"\"\n error_dist = [1, 0.06, 0.02, 0.02, 0.01,\n 0.005, 0.005, 0.005, 0.001, 0.001,\n 0.001, 0.0005]\n return error_dist\n",
"def get_sequenc... | # -----------------------------------------------------------------------------
# Copyright (c) 2013, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from operator import attrgetter
import numpy as np
import logging
from deblur.sequence import Sequence
def get_default_error_profile():
"""Return the default error profile for deblurring
based on illumina run data
"""
error_dist = [1, 0.06, 0.02, 0.02, 0.01,
0.005, 0.005, 0.005, 0.001, 0.001,
0.001, 0.0005]
return error_dist
def get_sequences(input_seqs):
"""Returns a list of Sequences
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
Returns
-------
list of Sequence
Raises
------
ValueError
If no sequences where found in `input_seqs`
If all the sequences do not have the same length either aligned or
unaligned.
"""
try:
seqs = [Sequence(id, seq) for id, seq in input_seqs]
except Exception:
seqs = []
if len(seqs) == 0:
logger = logging.getLogger(__name__)
logger.warn('No sequences found in fasta file!')
return None
# Check that all the sequence lengths (aligned and unaligned are the same)
aligned_lengths = set(s.length for s in seqs)
unaligned_lengths = set(s.unaligned_length for s in seqs)
if len(aligned_lengths) != 1 or len(unaligned_lengths) != 1:
raise ValueError(
"Not all sequence have the same length. Aligned lengths: %s, "
"sequence lengths: %s"
% (", ".join(map(str, aligned_lengths)),
", ".join(map(str, unaligned_lengths))))
seqs = sorted(seqs, key=attrgetter('frequency'), reverse=True)
return seqs
|
biocore/deblur | deblur/sequence.py | Sequence.to_fasta | python | def to_fasta(self):
prefix, suffix = re.split('(?<=size=)\w+', self.label, maxsplit=1)
new_count = int(round(self.frequency))
new_label = "%s%d%s" % (prefix, new_count, suffix)
return ">%s\n%s\n" % (new_label, self.sequence) | Returns a string with the sequence in fasta format
Returns
-------
str
The FASTA representation of the sequence | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/sequence.py#L58-L69 | null | class Sequence(object):
"""Sequence object to represent the aligned reads
Attributes
----------
label : str
The sequence label
sequence : str
The sequence string
length : int
The sequence length (aligned)
unaligned_length : int
The unaligned sequence length
frequency : float
The number of times the sequence have been seen in the dataset
np_sequence : numpy array of int8
An int8 numpy representation of the sequence string
Methods
-------
to_fasta
"""
def __init__(self, label, sequence):
self.label = label
self.sequence = sequence.upper()
self.length = len(self.sequence)
self.unaligned_length = self.length - self.sequence.count('-')
self.frequency = float(re.search('(?<=size=)\w+', self.label).group(0))
self.np_sequence = np.array(
[trans_dict[b] for b in self.sequence], dtype=np.int8)
def __eq__(self, other):
return (type(self) == type(other) and
self.sequence == other.sequence and
self.frequency == other.frequency)
def __ne__(self, other):
return not self.__eq__(other)
|
sashs/filebytes | filebytes/mach_o.py | MachO.isSupportedContent | python | def isSupportedContent(cls, fileContent):
magic = bytearray(fileContent)[:4]
return magic == p('>I', 0xfeedface) or magic == p('>I', 0xfeedfacf) or magic == p('<I', 0xfeedface) or magic == p('<I', 0xfeedfacf) | Returns if the files are valid for this filetype | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/mach_o.py#L476-L479 | null | class MachO(Binary):
def __init__(self, fileName, fileContent=None):
super(MachO, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__machHeader = self._parseMachHeader(self._bytes)
self.__loadCommands = self._parseLoadCommands(self._bytes, self.machHeader)
@property
def _classes(self):
return self.__classes
@property
def machHeader(self):
return self.__machHeader
@property
def loadCommands(self):
return self.__loadCommands
@property
def entryPoint(self):
return 0x0
@property
def imageBase(self):
for loadCommand in self.loadCommands:
if loadCommand.header.cmd == LC.SEGMENT or loadCommand.header.cmd == LC.SEGMENT_64:
for section in loadCommand.sections:
if section.header.flags & S_ATTR.SOME_INSTRUCTIONS or section.header.flags & S_ATTR.PURE_INSTRUCTIONS:
return section.header.addr - section.header.offset
return 0x0
@property
def type(self):
return 'MachO'
def _getSuitableClasses(self, data):
classes = None
if data[7] == 0:
classes = LSB_32
elif data[7] == 1:
classes = LSB_64
return classes
def _parseMachHeader(self, data):
header = self._classes.MachHeader.from_buffer(data)
if header.magic not in (0xfeedface, 0xfeedfacf, 0xcefaedfe, 0xcffaedfe):
raise BinaryError('No valid MachO file')
return MachHeaderData(header=header)
def _parseLoadCommands(self, data, machHeader):
offset = sizeof(self._classes.MachHeader)
load_commands = []
for i in range(machHeader.header.ncmds):
command = LoadCommand.from_buffer(data, offset)
raw = (c_ubyte * command.cmdsize).from_buffer(data, offset)
if command.cmd == LC.SEGMENT or command.cmd == LC.SEGMENT_64:
command = self.__parseSegmentCommand(data, offset, raw)
elif command.cmd == LC.UUID:
command = self.__parseUuidCommand(data, offset, raw)
elif command.cmd == LC.TWOLEVEL_HINTS:
command = self.__parseTwoLevelHintCommand(data, offset, raw)
elif command.cmd in (LC.ID_DYLIB, LC.LOAD_DYLIB, LC.LOAD_WEAK_DYLIB):
command = self.__parseDylibCommand(data, offset, raw)
elif command.cmd in (LC.ID_DYLINKER, LC.LOAD_DYLINKER):
command = self.__parseDylibCommand(data, offset, raw)
else:
command = LoadCommandData(header=command)
load_commands.append(command)
offset += command.header.cmdsize
return load_commands
def __parseSegmentCommand(self, data, offset, raw):
sc = self._classes.SegmentCommand.from_buffer(data, offset)
sections = self.__parseSections(data, sc, offset+sizeof(self._classes.SegmentCommand))
return LoadCommandData(header=sc, name=sc.segname.decode('ASCII'), sections=sections, bytes=bytearray(raw), raw=raw)
def __parseUuidCommand(self, data, offset, raw):
uc = UuidCommand.from_buffer(data, offset)
return LoadCommandData(header=uc, uuid=hexlify(uc.uuid), bytes=bytearray(raw), raw=raw)
def __parseTwoLevelHintCommand(self, data, offset, raw):
tlhc = TwoLevelHintsCommand.from_buffer(data, offset)
hints = self.__parseTwoLevelHints(data, tlhc)
return LoadCommandData(header=tlhc, twoLevelHints=hints, bytes=bytearray(raw), raw=raw)
def __parseTwoLevelHints(self, data, twoLevelHintCommand):
offset = twoLevelHintCommand.offset
hints = []
for i in twoLevelHintCommand.nhints:
tlh = TwoLevelHint.from_buffer(data, offset)
hints.append(TwoLevelHintData(header=tlh))
return hints
def __parseDylibCommand(self, data, offset, raw):
dc = DylibCommand.from_buffer(data, offset)
name = get_str(raw, dc.dylib.name.offset)
return LoadCommandData(header=dc, bytes=bytearray(raw), raw=raw, name=name)
def __parseDylinkerCommand(self, data, offset, raw):
dc = DylinkerCommand.from_buffer(data, offset)
name = get_str(raw, dc.name.offset)
return LoadCommandData(header=dc, bytes=bytearray(raw), raw=raw, name=name)
def __parseSections(self, data, segment, offset):
sections = []
for i in range(segment.nsects):
sec = self._classes.Section.from_buffer(data, offset)
if self._classes.Section == LSB_64_Section:
offset += 80
else:
offset += sizeof(self._classes.Section)
raw = (c_ubyte * sec.size).from_buffer(data, sec.offset)
sections.append(SectionData(header=sec, name=sec.sectname.decode('ASCII'),bytes=bytearray(raw), raw=raw))
return sections
@classmethod
|
sashs/filebytes | filebytes/oat.py | OAT._parseOatHeader | python | def _parseOatHeader(self, data):
header = OatHeader.from_buffer(data)
if header.magic != b'oat\n':
raise BinaryError('No valid OAT file')
key_value_store_bytes = (c_ubyte * header.keyValueStoreSize).from_buffer(data, sizeof(OatHeader))
key_value_store = self.__parseKeyValueStore(key_value_store_bytes)
return OatHeaderData(header=header, keyValueStoreRaw=key_value_store_bytes, keyValueStore=key_value_store) | Returns the OatHeader | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/oat.py#L169-L178 | null | class OAT(ELF):
def __init__(self, fileName, fileContent=None):
super(OAT, self).__init__(fileName, fileContent)
self._oatBytes = self._getOatBytes(self._bytes)
self.__oatHeader = self._parseOatHeader(self._oatBytes)
self.__oatDexHeader = self._parseOatDexHeader(self._oatBytes, self.oatHeader)
@property
def oatHeader(self):
return self.__oatHeader
@property
def oatDexHeader(self):
return self.__oatDexHeader
def _getOatBytes(self, data):
rodata_sec = None
text_sec = None
for section in self.sections:
if section.name == '.rodata':
rodata_sec = section
elif section.name == '.text':
text_sec = section
oat_size = (rodata_sec.header.sh_size + text_sec.header.sh_size)
return (c_ubyte * oat_size).from_buffer(data, rodata_sec.header.sh_offset)
def __parseKeyValueStore(self, data):
"""Returns a dictionary filled with the keys and values of the key value store"""
offset = 0
key_value_store = {}
while offset != len(data):
key = get_str(data, offset)
offset += len(key)+1
value = get_str(data, offset)
offset += len(value)+1
key_value_store[key] = value
return key_value_store
def _parseOatDexHeader(self, data, oatHeader):
oat_dex_files = []
offset = sizeof(OatHeader) + oatHeader.header.keyValueStoreSize
for i in range(oatHeader.header.dexFileCount):
size = c_uint.from_buffer(data, offset).value
oat_dex_file_header_struct = create_oat_dex_file_class(size)
odfh = oat_dex_file_header_struct.from_buffer(data, offset)
offset += sizeof(oat_dex_file_header_struct)
dex_file = DexHeader.from_buffer(data, odfh.dexFileOffset)
dex_raw = (c_ubyte*dex_file.fileSize).from_buffer(data, odfh.dexFileOffset)
class_offsets = None
oat_classes = None
if dex_file.classDefsSize > 0:
class_offsets = (c_uint*dex_file.classDefsSize).from_buffer(data, offset)
oat_classes = self._parseOatClasses(data, class_offsets)
offset += dex_file.classDefsSize*4
oat_dex_files.append((OatDexFileHeaderData(header=odfh, classOffsets=class_offsets, name=odfh.dexFileLocation.decode('ASCII'), dexHeader=dex_file, dexRaw=dex_raw, dexBytes=bytearray(dex_raw), oatClasses=oat_classes)))
return oat_dex_files
def _parseOatClasses(self, data, classOffsets):
oat_classes = []
for class_offset in classOffsets:
oat_class = OatClass.from_buffer(data, class_offset)
if oat_class.type != OatClassType.kOatClassNoneCompiled:
if oat_class.type == OatClassType.kOatClassSomeCompiled:
oat_class_with_bitmap_struct = create_oat_class_with_bitmap_class(oat_class.methodsPointer)
oat_class = oat_class_with_bitmap_struct.from_buffer(data, class_offset)
oat_classes.append(oat_class)
return oat_classes
|
sashs/filebytes | filebytes/oat.py | OAT.__parseKeyValueStore | python | def __parseKeyValueStore(self, data):
offset = 0
key_value_store = {}
while offset != len(data):
key = get_str(data, offset)
offset += len(key)+1
value = get_str(data, offset)
offset += len(value)+1
key_value_store[key] = value
return key_value_store | Returns a dictionary filled with the keys and values of the key value store | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/oat.py#L180-L193 | null | class OAT(ELF):
def __init__(self, fileName, fileContent=None):
super(OAT, self).__init__(fileName, fileContent)
self._oatBytes = self._getOatBytes(self._bytes)
self.__oatHeader = self._parseOatHeader(self._oatBytes)
self.__oatDexHeader = self._parseOatDexHeader(self._oatBytes, self.oatHeader)
@property
def oatHeader(self):
return self.__oatHeader
@property
def oatDexHeader(self):
return self.__oatDexHeader
def _getOatBytes(self, data):
rodata_sec = None
text_sec = None
for section in self.sections:
if section.name == '.rodata':
rodata_sec = section
elif section.name == '.text':
text_sec = section
oat_size = (rodata_sec.header.sh_size + text_sec.header.sh_size)
return (c_ubyte * oat_size).from_buffer(data, rodata_sec.header.sh_offset)
def _parseOatHeader(self, data):
"""Returns the OatHeader"""
header = OatHeader.from_buffer(data)
if header.magic != b'oat\n':
raise BinaryError('No valid OAT file')
key_value_store_bytes = (c_ubyte * header.keyValueStoreSize).from_buffer(data, sizeof(OatHeader))
key_value_store = self.__parseKeyValueStore(key_value_store_bytes)
return OatHeaderData(header=header, keyValueStoreRaw=key_value_store_bytes, keyValueStore=key_value_store)
def _parseOatDexHeader(self, data, oatHeader):
oat_dex_files = []
offset = sizeof(OatHeader) + oatHeader.header.keyValueStoreSize
for i in range(oatHeader.header.dexFileCount):
size = c_uint.from_buffer(data, offset).value
oat_dex_file_header_struct = create_oat_dex_file_class(size)
odfh = oat_dex_file_header_struct.from_buffer(data, offset)
offset += sizeof(oat_dex_file_header_struct)
dex_file = DexHeader.from_buffer(data, odfh.dexFileOffset)
dex_raw = (c_ubyte*dex_file.fileSize).from_buffer(data, odfh.dexFileOffset)
class_offsets = None
oat_classes = None
if dex_file.classDefsSize > 0:
class_offsets = (c_uint*dex_file.classDefsSize).from_buffer(data, offset)
oat_classes = self._parseOatClasses(data, class_offsets)
offset += dex_file.classDefsSize*4
oat_dex_files.append((OatDexFileHeaderData(header=odfh, classOffsets=class_offsets, name=odfh.dexFileLocation.decode('ASCII'), dexHeader=dex_file, dexRaw=dex_raw, dexBytes=bytearray(dex_raw), oatClasses=oat_classes)))
return oat_dex_files
def _parseOatClasses(self, data, classOffsets):
oat_classes = []
for class_offset in classOffsets:
oat_class = OatClass.from_buffer(data, class_offset)
if oat_class.type != OatClassType.kOatClassNoneCompiled:
if oat_class.type == OatClassType.kOatClassSomeCompiled:
oat_class_with_bitmap_struct = create_oat_class_with_bitmap_class(oat_class.methodsPointer)
oat_class = oat_class_with_bitmap_struct.from_buffer(data, class_offset)
oat_classes.append(oat_class)
return oat_classes
|
sashs/filebytes | filebytes/pe.py | to_raw_address | python | def to_raw_address(addr, section):
return addr - section.header.VirtualAddress + section.header.PointerToRawData | Converts the addr from a rva to a pointer to raw data in the file | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L356-L358 | null | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .enum import Enum
from .binary import *
###################### PE General #################
class IMAGE_FILE_MACHINE(Enum):
UKNOWN = 0
AM33 = 0x1d3
AMD64 = 0x8664
ARM = 0x1c0
ARMV = 0x1c4
EBC = 0xebc
I386 = 0x14c
IA64 = 0x200
M32R = 0x9041
MIPS16 = 0x266
MIPSFPU = 0x366
MIPSFPU16 = 0x466
POWERPC = 0x1f0
POWERPCFP = 0x1f1
THUMB = 0x1c2
WCEMIPSV2 = 0x169
class IMAGE_SCN(Enum):
TYPE_NO_PAD = 0x00000008
CNT_CODE = 0x00000020
CNT_INITIALIZED_DATA = 0x00000040
CNT_UNINITIALIZED_DATA = 0x00000080
LNK_OTHER = 0x00000100
LNK_INFO = 0x00000200
LNK_REMOVE = 0x00000800
LNK_COMDAT = 0x00001000
GPREL = 0x00008000
MEM_PURGEABLE = 0x00020000
MEM_LOCKED = 0x00040000
MEM_PRELOAD = 0x00080000
ALIGN_1BYTES = 0x00100000
ALIGN_2BYTES = 0x00200000
ALIGN_4BYTES = 0x00300000
ALIGN_8BYTES = 0x00400000
ALIGN_16BYTES = 0x00500000
ALIGN_32BYTES = 0x00600000
ALIGN_64BYTES = 0x00700000
ALIGN_128BYTES = 0x00800000
ALIGN_256BYTES = 0x00900000
ALIGN_512BYTES = 0x00A00000
ALIGN_1024BYTES = 0x00B00000
ALIGN_2048BYTES = 0x00C00000
ALIGN_4096BYTES = 0x00D00000
ALIGN_8192BYTES = 0x00E00000
LNK_NRELOC_OVFL = 0x01000000
MEM_WRITE = 0x80000000
MEM_READ = 0x4000000
class ImageDllCharacteristics(Enum):
DYNAMIC_BASE = 0x0040
FORCE_INTEGRITY = 0x0080
NX_COMPAT = 0x0100
NO_ISOLATION = 0x0200
NO_SEH = 0x0400
NO_BIND = 0x0800
APP_CONTAINER = 0x1000
WDM_DRIVER = 0x2000
CONTROL_FLOW_GUARD = 0x4000
TERMINAL_SERVER_AWARE = 0x8000
class ImageDirectoryEntry(Enum):
EXPORT = 0
IMPORT = 1
RESOURCE = 2
EXCEPTION = 3
SECURITY = 4
BASERELOC = 5
DEBUG = 6
COPYRIGHT = 7
GLOBALPTR = 8
TLS = 9
LOAD_CONFIG = 10
BOUND_IMPORT = 11
IAT = 12
DELAY_IMPORT = 13
COM_DESCRIPTOR = 14
NUMBER_OF_DIRECTORY_ENTRIES = 16
class IMAGE_DOS_HEADER(Structure):
_fields_ = [('e_magic', c_char * 2),
('e_cblp', c_ushort),
('e_cp', c_ushort),
('e_crlc', c_ushort),
('e_cparhdr', c_ushort),
('e_minalloc', c_ushort),
('e_maxalloc', c_ushort),
('e_ss', c_ushort),
('e_sp', c_ushort),
('e_csum', c_ushort),
('e_ip', c_ushort),
('e_cs', c_ushort),
('e_lfarlc', c_ushort),
('e_ovno', c_ushort),
('e_res', c_ushort * 4),
('e_oemid', c_ushort),
('e_oeminfo', c_ushort),
('e_res2', c_ushort * 10),
('e_lfanew', c_uint)] # Offset zum PE-Header
class IMAGE_FILE_HEADER(Structure):
_fields_ = [('Machine', c_ushort),
('NumberOfSections', c_ushort),
('TimeDateStamp', c_uint),
('PointerToSymbolTable', c_uint),
('NumberOfSymbols', c_uint),
('SizeOfOptionalHeader', c_ushort),
('Characteristics', c_ushort)
]
class IMAGE_DATA_DIRECTORY(Structure):
_fields_ = [('VirtualAddress', c_uint),
('Size', c_uint)]
class IMAGE_SECTION_HEADER(Structure):
_fields_ = [('Name', c_char * 8),
('PhysicalAddress_or_VirtualSize', c_uint),
('VirtualAddress', c_uint),
('SizeOfRawData', c_uint),
('PointerToRawData', c_uint),
('PointerToRelocations', c_uint),
('PointerToLinenumbers', c_uint),
('NumberOfRelocations', c_ushort),
('NumberOfLinenumbers', c_ushort),
('Characteristics', c_uint)]
class IMAGE_IMPORT_BY_NAME(Structure):
_fields_ = [('Hint', c_ushort),
('Name', c_char)]
class IMAGE_THUNK_DATA(Union):
_fields_ = [('ForwarderString', c_uint),
('Function', c_uint),
('Ordinal', c_uint),
('AddressOfData', c_uint)]
class IMAGE_IMPORT_DESCRIPTOR(Structure):
_fields_ = [('OriginalFirstThunk', c_uint),
('TimeDateStamp', c_uint),
('ForwarderChain', c_uint),
('Name', c_uint),
('FirstThunk', c_uint)]
class IMAGE_EXPORT_DIRECTORY(Structure):
_fields_ = [('Characteristics',c_uint),
('TimeDateStamp',c_uint),
('MajorVersion', c_ushort),
('MinorVersion', c_ushort),
('Name',c_uint),
('Base',c_uint),
('NumberOfFunctions',c_uint),
('NumberOfNames',c_uint),
('AddressOfFunctions',c_uint),
('AddressOfNames',c_uint),
('AddressOfNameOrdinals',c_uint)
]
class GUARD_CFF_ENTRY(Structure):
_fields_ = [('rva',c_uint),
('flag', c_byte)]
##################### PE32 ########################
class IMAGE_OPTIONAL_HEADER(Structure):
_fields_ = [('Magic', c_ushort),
('MajorLinkerVersion', c_byte),
('MinorLinkerVersion', c_byte),
('SizeOfCode', c_uint),
('SizeOfInitializedData', c_uint),
('SizeOfUninitializedData', c_uint),
('AddressOfEntryPoint', c_uint),
('BaseOfCode', c_uint),
('BaseOfData', c_uint),
('ImageBase', c_uint),
('SectionAlignment', c_uint),
('FileAlignment', c_uint),
('MajorOperatingSystemVersion', c_ushort),
('MinorOperatingSystemVersion', c_ushort),
('MajorImageVersion', c_ushort),
('MinorImageVersion', c_ushort),
('MajorSubsystemVersion', c_ushort),
('MinorSubsystemVersion', c_ushort),
('Win32VersionValue', c_uint),
('SizeOfImage', c_uint),
('SizeOfHeaders', c_uint),
('CheckSum', c_uint),
('Subsystem', c_ushort),
('DllCharacteristics', c_ushort),
('SizeOfStackReserve', c_uint),
('SizeOfStackCommit', c_uint),
('SizeOfHeapReserve', c_uint),
('SizeOfHeapCommit', c_uint),
('LoaderFlags', c_uint),
('NumberOfRvaAndSizes', c_uint),
('DataDirectory', IMAGE_DATA_DIRECTORY * 16)]
class PE32_IMAGE_NT_HEADERS(Structure):
_fields_ = [('Signature', c_char * 4),
('FileHeader', IMAGE_FILE_HEADER),
('OptionalHeader', IMAGE_OPTIONAL_HEADER)]
class PE32(object):
IMAGE_NT_HEADERS = PE32_IMAGE_NT_HEADERS
class IMAGE_LOAD_CONFIG_DIRECTORY32(Structure):
_fields_ = [('Size', c_uint),
('TimeDateStamp', c_uint),
('MajorVersion', c_ushort),
('MinorVersion', c_ushort),
('GlobalFlagsClear', c_uint),
('GlobalFlagsSet', c_uint),
('CriticalSectionDefaultTimeout', c_uint),
('DeCommitFreeBLockThreshold', c_uint),
('DeCommitTotalFreeThreshold', c_uint),
('LockPrefixTable', c_uint),
('MaximumAllocationSize', c_uint),
('VirtualMemoryThreshold', c_uint),
('ProcessHeapFlags', c_uint),
('ProcessAffinityMask', c_uint),
('CSDVersion', c_ushort),
('Reserved1', c_ushort),
('EditList', c_uint),
('SecurityCookie', c_uint),
('SEHandlerTable', c_uint),
('SEHandlerCount', c_uint),
('GuardCFCheckFunctionPointer', c_uint),
('Reserved2', c_uint),
('GuardCFFunctionTable', c_uint),
('GuardCFFunctionCount', c_uint),
('GuardFlags', c_uint)]
######################### PE64 ########################
class IMAGE_OPTIONAL_HEADER_PE32_PLUS(Structure):
_fields_ = [('Magic', c_ushort),
('MajorLinkerVersion', c_ubyte),
('MinorLinkerVersion', c_ubyte),
('SizeOfCode', c_uint),
('SizeOfInitializedData', c_uint),
('SizeOfUninitializedData', c_uint),
('AddressOfEntryPoint', c_uint),
('BaseOfCode', c_uint),
('ImageBase', c_ulonglong),
('SectionAlignment', c_uint),
('FileAlignment', c_uint),
('MajorOperatingSystemVersion', c_ushort),
('MinorOperatingSystemVersion', c_ushort),
('MajorImageVersion', c_ushort),
('MinorImageVersion', c_ushort),
('MajorSubsystemVersion', c_ushort),
('MinorSubsystemVersion', c_ushort),
('Win32VersionValue', c_uint),
('SizeOfImage', c_uint),
('SizeOfHeaders', c_uint),
('CheckSum', c_uint),
('Subsystem', c_ushort),
('DllCharacteristics', c_ushort),
('SizeOfStackReserve', c_ulonglong),
('SizeOfStackCommit', c_ulonglong),
('SizeOfHeapReserve', c_ulonglong),
('SizeOfHeapCommit', c_ulonglong),
('LoaderFlags', c_uint),
('NumberOfRvaAndSizes', c_uint),
('DataDirectory', IMAGE_DATA_DIRECTORY * 16)]
class PE64_IMAGE_NT_HEADERS(Structure):
_fields_ = [('Signature', c_char * 4),
('FileHeader', IMAGE_FILE_HEADER),
('OptionalHeader', IMAGE_OPTIONAL_HEADER_PE32_PLUS)]
class PE64(object):
IMAGE_NT_HEADERS = PE64_IMAGE_NT_HEADERS
class IMAGE_LOAD_CONFIG_DIRECTORY64(Structure):
_fields_ = [('Size', c_uint),
('TimeDateStamp', c_uint),
('MajorVersion', c_ushort),
('MinorVersion', c_ushort),
('GlobalFlagsClear', c_uint),
('GlobalFlagsSet', c_uint),
('CriticalSectionDefaultTimeout', c_uint),
('DeCommitFreeBLockThreshold', c_ulonglong),
('DeCommitTotalFreeThreshold', c_ulonglong),
('LockPrefixTable', c_ulonglong),
('MaximumAllocationSize', c_ulonglong),
('VirtualMemoryThreshold', c_ulonglong),
('ProcessAffinityMask', c_ulonglong),
('ProcessHeapFlags', c_uint),
('CSDVersion', c_ushort),
('Reserved1', c_ushort),
('EditList', c_ulonglong),
('SecurityCookie', c_ulonglong),
('SEHandlerTable', c_ulonglong),
('SEHandlerCount', c_ulonglong),
('GuardCFCheckFunctionPointer', c_ulonglong),
('Reserved2', c_ulonglong),
('GuardCFFunctionTable', c_ulonglong),
('GuardCFFunctionCount', c_ulonglong),
('GuardFlags', c_uint)]
##################### Container ###################
def to_offset(addr, section):
return addr - section.header.VirtualAddress
class ImageDosHeaderData(Container):
"""
header = IMAGE_DOS_HEADER
"""
class ImageNtHeaderData(Container):
"""
header = IMAGE_NT_HEADERS
"""
class SectionData(Container):
"""
header = IMAGE_SECTION_HEADER
name = name of the section (str)
bytes = bytes of section (bytearray)
raw = bytes of section (c_ubyte_array)
"""
class DataDirectoryData(Container):
"""
header = IMAGE_DATA_DIRECTORY
"""
class ImportDescriptorData(Container):
"""
header = IMAGE_IMPORT_DESCRIPTOR
dllName = name of dll (str)
importNameTable = list of IMAGE_THUNK_DATA
importAddressTable = list of IMAGE_THUNK_DATA
"""
class ImportByNameData(Container):
"""
header = IMAGE_IMPORT_BY_NAME
name = name of function (str)
"""
class ThunkData(Container):
"""
header = IMAGE_THUNK_DATA
rva = relative virtual address of thunk
ordinal = None | Ordinal
importByName = None| ImportByNameData
"""
class ExportDirectoryData(Container):
"""
header = IMAGE_EXPORT_DIRECTORY
name = name of dll (str)
functions = list of FunctionData
"""
class LoadConfigData(Container):
""""
header = IMAGE_LOAD_CONFIG_DIRECTORY32/IMAGE_LOAD_CONFIG_DIRECTORY64
cfGuardedFunctions = list of relative virtual addresses (RVA) of cfg allowed call/jmp targets. Empty if CFG not supported
"""
class FunctionData(Container):
"""
name = name of the function (str)
ordinal = ordinal (int)
rva = relative virtual address of function (int)
"""
def checkOffset(offset, section):
size = len(section.raw)
if offset < 0 or offset > size:
raise BinaryError('Invalid offset: {} (data size: {})'.format(offset, size))
class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._getSuitableClasses | python | def _getSuitableClasses(self, data, imageDosHeader):
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes | Returns the class which holds the suitable classes for the loaded file | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L486-L496 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._parseImageDosHeader | python | def _parseImageDosHeader(self, data):
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh) | Returns the ImageDosHeader | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L498-L504 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._parseImageNtHeaders | python | def _parseImageNtHeaders(self, data, imageDosHeader):
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth) | Returns the ImageNtHeaders | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L506-L513 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._parseSections | python | def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections | Parses the sections in the memory and returns a list of them | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L515-L539 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._getSectionForDataDirectoryEntry | python | def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section | Returns the section which contains the data of DataDirectory | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L541-L547 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._parseDataDirectory | python | def _parseDataDirectory(self, data, sections, imageNtHeaders):
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list | Parses the entries of the DataDirectory and returns a list of the content | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L549-L571 | [
"def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):\n \"\"\"Returns the section which contains the data of DataDirectory\"\"\"\n for section in sections:\n if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \\\n data_directory_entry.VirtualAddres... | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._parseDataDirectoryExport | python | def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions) | Parses the EmportDataDirectory and returns an instance of ExportDirectoryData | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L573-L601 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE._parseDataDirectoryImport | python | def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors | Parses the ImportDataDirectory and returns a list of ImportDescriptorData | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L603-L629 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE.__parseThunks | python | def __parseThunks(self, thunkRVA, importSection):
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks | Parses the thunks and returns a list | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L672-L687 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name)
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/pe.py | PE.__parseThunkData | python | def __parseThunkData(self, thunk,importSection):
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name) | Parses the data of a thunk and sets the data | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L689-L699 | null | class PE(Binary):
def __init__(self, fileName, fileContent=None, parse_header_only=False):
super(PE, self).__init__(fileName, fileContent)
self.__imageDosHeader = self._parseImageDosHeader(self._bytes)
self.__classes = self._getSuitableClasses(self._bytes, self.imageDosHeader)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__imageNtHeaders = self._parseImageNtHeaders(self._bytes, self.imageDosHeader)
self.__sections = self._parseSections(self._bytes, self.imageDosHeader, self.imageNtHeaders, parse_header_only=parse_header_only)
if parse_header_only:
self.__dataDirectory = None
else:
self.__dataDirectory = self._parseDataDirectory(self._bytes, self.sections, self.imageNtHeaders)
@property
def _classes(self):
return self.__classes
@property
def imageDosHeader(self):
return self.__imageDosHeader
@property
def imageNtHeaders(self):
return self.__imageNtHeaders
@property
def sections(self):
return self.__sections
@property
def dataDirectory(self):
return self.__dataDirectory
@property
def entryPoint(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase + self.imageNtHeaders.header.OptionalHeader.AddressOfEntryPoint
@property
def imageBase(self):
return self.imageNtHeaders.header.OptionalHeader.ImageBase
@property
def type(self):
return 'PE'
def _getSuitableClasses(self, data, imageDosHeader):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
machine = IMAGE_FILE_MACHINE[c_ushort.from_buffer(data,imageDosHeader.header.e_lfanew+4).value]
if machine == IMAGE_FILE_MACHINE.I386:
classes = PE32
elif machine == IMAGE_FILE_MACHINE.AMD64:
classes = PE64
return classes
def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh)
def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections
def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section
def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list
def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions)
def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors
def _getSectionByRVA(self, va):
for section in self.sections:
address = section.header.VirtualAddress
SizeOfRawData = section.header.SizeOfRawData
if address <= va and va < (address + SizeOfRawData):
return section
return
def _parseLoadConfig(self, loadConfigEntry, loadconfigSection):
if not loadconfigSection:
return
if self._classes == PE64:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY64.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
elif self._classes == PE32:
load_config_directory = IMAGE_LOAD_CONFIG_DIRECTORY32.from_buffer(
loadconfigSection.raw, to_offset(loadConfigEntry.VirtualAddress, loadconfigSection))
pass
else:
pass
guardCFTableRVA = load_config_directory.GuardCFFunctionTable - self.imageBase
section = self._getSectionByRVA(guardCFTableRVA)
CfGuardedFunctions = set()
if section:
sectionOffset = guardCFTableRVA - section.header.VirtualAddress
# loop through the ControlFlow Guard Function table
for i in range(0, load_config_directory.GuardCFFunctionCount):
cffEntry = GUARD_CFF_ENTRY.from_buffer(section.raw, sectionOffset)
CfGuardedFunctions.add(cffEntry.rva)
sectionOffset += 5
return LoadConfigData(header=load_config_directory, cfGuardedFunctions=CfGuardedFunctions )
def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:2] == b'MZ'
|
sashs/filebytes | filebytes/ctypes_helper.py | get_ptr | python | def get_ptr(data, offset=None, ptr_type=ctypes.c_void_p):
ptr = ctypes.cast(ctypes.pointer(data), ctypes.c_void_p)
if offset:
ptr = ctypes.c_void_p(ptr.value + offset)
if ptr_type != ctypes.c_void_p:
ptr = ctypes.cast(ptr, ptr_type)
return ptr | Returns a void pointer to the data | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/ctypes_helper.py#L33-L43 | null | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from struct import pack_into
import ctypes
def get_str(data, offset=None):
return get_ptr(data, offset, ctypes.c_char_p).value.decode('ASCII')
def to_ubyte_array(barray):
"""Returns a c_ubyte_array filled with the given data of a bytearray or bytes"""
bs = (ctypes.c_ubyte * len(barray))()
pack_into('%ds' % len(barray), bs, 0, barray)
return bs
|
sashs/filebytes | filebytes/ctypes_helper.py | to_ubyte_array | python | def to_ubyte_array(barray):
bs = (ctypes.c_ubyte * len(barray))()
pack_into('%ds' % len(barray), bs, 0, barray)
return bs | Returns a c_ubyte_array filled with the given data of a bytearray or bytes | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/ctypes_helper.py#L48-L53 | null | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from struct import pack_into
import ctypes
def get_ptr(data, offset=None, ptr_type=ctypes.c_void_p):
"""Returns a void pointer to the data"""
ptr = ctypes.cast(ctypes.pointer(data), ctypes.c_void_p)
if offset:
ptr = ctypes.c_void_p(ptr.value + offset)
if ptr_type != ctypes.c_void_p:
ptr = ctypes.cast(ptr, ptr_type)
return ptr
def get_str(data, offset=None):
return get_ptr(data, offset, ctypes.c_char_p).value.decode('ASCII')
|
sashs/filebytes | filebytes/binary.py | Binary._readFile | python | def _readFile(self, fileName):
with open(fileName, 'rb') as binFile:
b = binFile.read()
return to_ubyte_array(b) | Returns the bytes of the file. | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/binary.py#L71-L77 | [
"def to_ubyte_array(barray):\n \"\"\"Returns a c_ubyte_array filled with the given data of a bytearray or bytes\"\"\"\n bs = (ctypes.c_ubyte * len(barray))()\n pack_into('%ds' % len(barray), bs, 0, barray)\n\n return bs\n"
] | class Binary(object):
def __init__(self, fileName, fileContent=None):
self._bytes = to_ubyte_array(fileContent) if fileContent else self._readFile(fileName)
if not self.__class__.isSupportedContent(self._bytes):
raise BinaryError('Not a suitable filetype')
self.__fileName = fileName
@property
def fileName(self):
"""
Returns the filename
"""
return self.__fileName
@property
def entryPoint(self):
return 0x0
@property
def imageBase(self):
return 0x0
@property
def type(self):
return 'ELF'
def assertFileRange(self, value):
if type(value) == c_void_p:
value = value.value
file_data_pointer = get_ptr(self._bytes)
assert value >= (file_data_pointer.value) and value <= (
file_data_pointer.value + len(self._bytes)), 'Pointer not in file range'
@classmethod
def isSupportedFile(cls, fileName):
try:
with open(fileName, 'rb') as f:
return cls.isSupportedContent(f.read())
except BaseException as e:
raise BinaryError(e)
@classmethod
def isSupportedContent(cls, fileContent):
return False
|
sashs/filebytes | filebytes/elf.py | ELF._getSuitableClasses | python | def _getSuitableClasses(self, data):
classes = None
if data[EI.CLASS] == ELFCLASS.BITS_32:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_32
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_32
elif data[EI.CLASS] == ELFCLASS.BITS_64:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_64
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_64
return classes | Returns the class which holds the suitable classes for the loaded file | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L881-L896 | null | class ELF(Binary):
def __init__(self, fileName, fileContent=None):
super(ELF, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__elfHeader = self._parseElfHeader(self._bytes)
self.__segments = self._parseSegments(self._bytes, self.elfHeader)
self.__sections = self._parseSections(self._bytes, self.elfHeader)
self._parseSymbols(self.sections)
self._parseDynamic(self.sections)
self._parseRelocations(self.sections)
@property
def _classes(self):
return self.__classes
@property
def elfHeader(self):
return self.__elfHeader
@property
def sections(self):
return list(self.__sections)
@property
def segments(self):
return list(self.__segments)
@property
def programHeaders(self):
return list(self.__segments)
@property
def entryPoint(self):
return self.elfHeader.header.e_entry
@property
def imageBase(self):
return self.segments[0].header.p_vaddr - self.segments[0].header.p_offset if len(self.segments) > 0 else 0
def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr)
def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments
def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs
def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab)
def __parseSymbolEntriesForSection(self, section, strtab):
entries = []
offset = 0
bytes_p = cast(pointer(section.raw), c_void_p)
sym_size = sizeof(self.__classes.SYM)
for i in range(int(section.header.sh_size / sym_size)):
entry = self.__classes.SYM.from_buffer(section.raw, offset)
name = get_str(strtab.raw, entry.st_name)
sym_data = SymbolData(header=entry, name=name, type=entry.st_info & 0xf, bind=entry.st_info >> 4)
entries.append(sym_data)
offset += sym_size
return entries
def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations
def __parseRelocationEntries(self, section, symbols):
struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA
struct_size = sizeof(struct)
offset = 0
entries = []
for i in range(int(section.header.sh_size / struct_size)):
entry = struct.from_buffer(section.raw, offset)
sym = symbols[self.__classes.R_SYM(entry.r_info)]
reloc_entry = RelocationData(header=entry, symbol=sym, type=self.__classes.R_TYPE(entry.r_info))
entries.append(reloc_entry)
offset += sizeof(struct)
return entries
def _parseDynamic(self, sections):
dyn_size = sizeof(self._classes.DYN)
for section in sections:
offset = 0
dyns = []
if section.header.sh_type == SHT.DYNAMIC:
for i in range(int(len(section.bytes) / dyn_size)):
dyn = self._classes.DYN.from_buffer(section.raw, offset)
dyns.append(DynamicData(header=dyn, tag=DT[dyn.d_tag]))
if dyn.d_tag == DT.NULL:
break
offset += dyn_size
section.content = dyns
self._parseDynamicTags(dyns, sections)
def _parseDynamicTags(self, dyns, sections):
for dyn in dyns:
if dyn.header.d_tag == DT.NEEDED:
self.__parseDynamicTagNeeded(dyn, dyns, sections)
def __parseDynamicTagNeeded(self, dyn, dyns, sections):
dyn_strtab = None
for d in dyns:
if d.header.d_tag == DT.STRTAB:
dyn_strtab = d
if not dyn_strtab:
return
for section in sections:
if section.header.sh_addr == dyn_strtab.header.d_un:
dyn.val = get_str(section.raw, dyn.header.d_un)
break
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:4] == b'\x7fELF'
|
sashs/filebytes | filebytes/elf.py | ELF._parseElfHeader | python | def _parseElfHeader(self, data):
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr) | Returns the elf header | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L898-L901 | null | class ELF(Binary):
def __init__(self, fileName, fileContent=None):
super(ELF, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__elfHeader = self._parseElfHeader(self._bytes)
self.__segments = self._parseSegments(self._bytes, self.elfHeader)
self.__sections = self._parseSections(self._bytes, self.elfHeader)
self._parseSymbols(self.sections)
self._parseDynamic(self.sections)
self._parseRelocations(self.sections)
@property
def _classes(self):
return self.__classes
@property
def elfHeader(self):
return self.__elfHeader
@property
def sections(self):
return list(self.__sections)
@property
def segments(self):
return list(self.__segments)
@property
def programHeaders(self):
return list(self.__segments)
@property
def entryPoint(self):
return self.elfHeader.header.e_entry
@property
def imageBase(self):
return self.segments[0].header.p_vaddr - self.segments[0].header.p_offset if len(self.segments) > 0 else 0
def _getSuitableClasses(self, data):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
if data[EI.CLASS] == ELFCLASS.BITS_32:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_32
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_32
elif data[EI.CLASS] == ELFCLASS.BITS_64:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_64
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_64
return classes
def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments
def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs
def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab)
def __parseSymbolEntriesForSection(self, section, strtab):
entries = []
offset = 0
bytes_p = cast(pointer(section.raw), c_void_p)
sym_size = sizeof(self.__classes.SYM)
for i in range(int(section.header.sh_size / sym_size)):
entry = self.__classes.SYM.from_buffer(section.raw, offset)
name = get_str(strtab.raw, entry.st_name)
sym_data = SymbolData(header=entry, name=name, type=entry.st_info & 0xf, bind=entry.st_info >> 4)
entries.append(sym_data)
offset += sym_size
return entries
def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations
def __parseRelocationEntries(self, section, symbols):
struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA
struct_size = sizeof(struct)
offset = 0
entries = []
for i in range(int(section.header.sh_size / struct_size)):
entry = struct.from_buffer(section.raw, offset)
sym = symbols[self.__classes.R_SYM(entry.r_info)]
reloc_entry = RelocationData(header=entry, symbol=sym, type=self.__classes.R_TYPE(entry.r_info))
entries.append(reloc_entry)
offset += sizeof(struct)
return entries
def _parseDynamic(self, sections):
dyn_size = sizeof(self._classes.DYN)
for section in sections:
offset = 0
dyns = []
if section.header.sh_type == SHT.DYNAMIC:
for i in range(int(len(section.bytes) / dyn_size)):
dyn = self._classes.DYN.from_buffer(section.raw, offset)
dyns.append(DynamicData(header=dyn, tag=DT[dyn.d_tag]))
if dyn.d_tag == DT.NULL:
break
offset += dyn_size
section.content = dyns
self._parseDynamicTags(dyns, sections)
def _parseDynamicTags(self, dyns, sections):
for dyn in dyns:
if dyn.header.d_tag == DT.NEEDED:
self.__parseDynamicTagNeeded(dyn, dyns, sections)
def __parseDynamicTagNeeded(self, dyn, dyns, sections):
dyn_strtab = None
for d in dyns:
if d.header.d_tag == DT.STRTAB:
dyn_strtab = d
if not dyn_strtab:
return
for section in sections:
if section.header.sh_addr == dyn_strtab.header.d_un:
dyn.val = get_str(section.raw, dyn.header.d_un)
break
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:4] == b'\x7fELF'
|
sashs/filebytes | filebytes/elf.py | ELF._parseSegments | python | def _parseSegments(self, data, elfHeader):
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments | Return a list of segments | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L903-L916 | null | class ELF(Binary):
def __init__(self, fileName, fileContent=None):
super(ELF, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__elfHeader = self._parseElfHeader(self._bytes)
self.__segments = self._parseSegments(self._bytes, self.elfHeader)
self.__sections = self._parseSections(self._bytes, self.elfHeader)
self._parseSymbols(self.sections)
self._parseDynamic(self.sections)
self._parseRelocations(self.sections)
@property
def _classes(self):
return self.__classes
@property
def elfHeader(self):
return self.__elfHeader
@property
def sections(self):
return list(self.__sections)
@property
def segments(self):
return list(self.__segments)
@property
def programHeaders(self):
return list(self.__segments)
@property
def entryPoint(self):
return self.elfHeader.header.e_entry
@property
def imageBase(self):
return self.segments[0].header.p_vaddr - self.segments[0].header.p_offset if len(self.segments) > 0 else 0
def _getSuitableClasses(self, data):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
if data[EI.CLASS] == ELFCLASS.BITS_32:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_32
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_32
elif data[EI.CLASS] == ELFCLASS.BITS_64:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_64
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_64
return classes
def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr)
def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs
def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab)
def __parseSymbolEntriesForSection(self, section, strtab):
entries = []
offset = 0
bytes_p = cast(pointer(section.raw), c_void_p)
sym_size = sizeof(self.__classes.SYM)
for i in range(int(section.header.sh_size / sym_size)):
entry = self.__classes.SYM.from_buffer(section.raw, offset)
name = get_str(strtab.raw, entry.st_name)
sym_data = SymbolData(header=entry, name=name, type=entry.st_info & 0xf, bind=entry.st_info >> 4)
entries.append(sym_data)
offset += sym_size
return entries
def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations
def __parseRelocationEntries(self, section, symbols):
struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA
struct_size = sizeof(struct)
offset = 0
entries = []
for i in range(int(section.header.sh_size / struct_size)):
entry = struct.from_buffer(section.raw, offset)
sym = symbols[self.__classes.R_SYM(entry.r_info)]
reloc_entry = RelocationData(header=entry, symbol=sym, type=self.__classes.R_TYPE(entry.r_info))
entries.append(reloc_entry)
offset += sizeof(struct)
return entries
def _parseDynamic(self, sections):
dyn_size = sizeof(self._classes.DYN)
for section in sections:
offset = 0
dyns = []
if section.header.sh_type == SHT.DYNAMIC:
for i in range(int(len(section.bytes) / dyn_size)):
dyn = self._classes.DYN.from_buffer(section.raw, offset)
dyns.append(DynamicData(header=dyn, tag=DT[dyn.d_tag]))
if dyn.d_tag == DT.NULL:
break
offset += dyn_size
section.content = dyns
self._parseDynamicTags(dyns, sections)
def _parseDynamicTags(self, dyns, sections):
for dyn in dyns:
if dyn.header.d_tag == DT.NEEDED:
self.__parseDynamicTagNeeded(dyn, dyns, sections)
def __parseDynamicTagNeeded(self, dyn, dyns, sections):
dyn_strtab = None
for d in dyns:
if d.header.d_tag == DT.STRTAB:
dyn_strtab = d
if not dyn_strtab:
return
for section in sections:
if section.header.sh_addr == dyn_strtab.header.d_un:
dyn.val = get_str(section.raw, dyn.header.d_un)
break
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:4] == b'\x7fELF'
|
sashs/filebytes | filebytes/elf.py | ELF._parseSections | python | def _parseSections(self, data, elfHeader):
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs | Returns a list of sections | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L918-L939 | [
"def get_str(data, offset=None):\n return get_ptr(data, offset, ctypes.c_char_p).value.decode('ASCII')\n"
] | class ELF(Binary):
def __init__(self, fileName, fileContent=None):
super(ELF, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__elfHeader = self._parseElfHeader(self._bytes)
self.__segments = self._parseSegments(self._bytes, self.elfHeader)
self.__sections = self._parseSections(self._bytes, self.elfHeader)
self._parseSymbols(self.sections)
self._parseDynamic(self.sections)
self._parseRelocations(self.sections)
@property
def _classes(self):
return self.__classes
@property
def elfHeader(self):
return self.__elfHeader
@property
def sections(self):
return list(self.__sections)
@property
def segments(self):
return list(self.__segments)
@property
def programHeaders(self):
return list(self.__segments)
@property
def entryPoint(self):
return self.elfHeader.header.e_entry
@property
def imageBase(self):
return self.segments[0].header.p_vaddr - self.segments[0].header.p_offset if len(self.segments) > 0 else 0
def _getSuitableClasses(self, data):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
if data[EI.CLASS] == ELFCLASS.BITS_32:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_32
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_32
elif data[EI.CLASS] == ELFCLASS.BITS_64:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_64
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_64
return classes
def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr)
def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments
def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab)
def __parseSymbolEntriesForSection(self, section, strtab):
entries = []
offset = 0
bytes_p = cast(pointer(section.raw), c_void_p)
sym_size = sizeof(self.__classes.SYM)
for i in range(int(section.header.sh_size / sym_size)):
entry = self.__classes.SYM.from_buffer(section.raw, offset)
name = get_str(strtab.raw, entry.st_name)
sym_data = SymbolData(header=entry, name=name, type=entry.st_info & 0xf, bind=entry.st_info >> 4)
entries.append(sym_data)
offset += sym_size
return entries
def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations
def __parseRelocationEntries(self, section, symbols):
struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA
struct_size = sizeof(struct)
offset = 0
entries = []
for i in range(int(section.header.sh_size / struct_size)):
entry = struct.from_buffer(section.raw, offset)
sym = symbols[self.__classes.R_SYM(entry.r_info)]
reloc_entry = RelocationData(header=entry, symbol=sym, type=self.__classes.R_TYPE(entry.r_info))
entries.append(reloc_entry)
offset += sizeof(struct)
return entries
def _parseDynamic(self, sections):
dyn_size = sizeof(self._classes.DYN)
for section in sections:
offset = 0
dyns = []
if section.header.sh_type == SHT.DYNAMIC:
for i in range(int(len(section.bytes) / dyn_size)):
dyn = self._classes.DYN.from_buffer(section.raw, offset)
dyns.append(DynamicData(header=dyn, tag=DT[dyn.d_tag]))
if dyn.d_tag == DT.NULL:
break
offset += dyn_size
section.content = dyns
self._parseDynamicTags(dyns, sections)
def _parseDynamicTags(self, dyns, sections):
for dyn in dyns:
if dyn.header.d_tag == DT.NEEDED:
self.__parseDynamicTagNeeded(dyn, dyns, sections)
def __parseDynamicTagNeeded(self, dyn, dyns, sections):
dyn_strtab = None
for d in dyns:
if d.header.d_tag == DT.STRTAB:
dyn_strtab = d
if not dyn_strtab:
return
for section in sections:
if section.header.sh_addr == dyn_strtab.header.d_un:
dyn.val = get_str(section.raw, dyn.header.d_un)
break
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:4] == b'\x7fELF'
|
sashs/filebytes | filebytes/elf.py | ELF._parseSymbols | python | def _parseSymbols(self, sections):
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab) | Sets a list of symbols in each DYNSYM and SYMTAB section | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L941-L946 | [
"def __parseSymbolEntriesForSection(self, section, strtab):\n entries = []\n offset = 0\n bytes_p = cast(pointer(section.raw), c_void_p)\n sym_size = sizeof(self.__classes.SYM)\n\n for i in range(int(section.header.sh_size / sym_size)):\n entry = self.__classes.SYM.from_buffer(section.raw, off... | class ELF(Binary):
def __init__(self, fileName, fileContent=None):
super(ELF, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__elfHeader = self._parseElfHeader(self._bytes)
self.__segments = self._parseSegments(self._bytes, self.elfHeader)
self.__sections = self._parseSections(self._bytes, self.elfHeader)
self._parseSymbols(self.sections)
self._parseDynamic(self.sections)
self._parseRelocations(self.sections)
@property
def _classes(self):
return self.__classes
@property
def elfHeader(self):
return self.__elfHeader
@property
def sections(self):
return list(self.__sections)
@property
def segments(self):
return list(self.__segments)
@property
def programHeaders(self):
return list(self.__segments)
@property
def entryPoint(self):
return self.elfHeader.header.e_entry
@property
def imageBase(self):
return self.segments[0].header.p_vaddr - self.segments[0].header.p_offset if len(self.segments) > 0 else 0
def _getSuitableClasses(self, data):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
if data[EI.CLASS] == ELFCLASS.BITS_32:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_32
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_32
elif data[EI.CLASS] == ELFCLASS.BITS_64:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_64
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_64
return classes
def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr)
def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments
def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs
def __parseSymbolEntriesForSection(self, section, strtab):
entries = []
offset = 0
bytes_p = cast(pointer(section.raw), c_void_p)
sym_size = sizeof(self.__classes.SYM)
for i in range(int(section.header.sh_size / sym_size)):
entry = self.__classes.SYM.from_buffer(section.raw, offset)
name = get_str(strtab.raw, entry.st_name)
sym_data = SymbolData(header=entry, name=name, type=entry.st_info & 0xf, bind=entry.st_info >> 4)
entries.append(sym_data)
offset += sym_size
return entries
def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations
def __parseRelocationEntries(self, section, symbols):
struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA
struct_size = sizeof(struct)
offset = 0
entries = []
for i in range(int(section.header.sh_size / struct_size)):
entry = struct.from_buffer(section.raw, offset)
sym = symbols[self.__classes.R_SYM(entry.r_info)]
reloc_entry = RelocationData(header=entry, symbol=sym, type=self.__classes.R_TYPE(entry.r_info))
entries.append(reloc_entry)
offset += sizeof(struct)
return entries
def _parseDynamic(self, sections):
dyn_size = sizeof(self._classes.DYN)
for section in sections:
offset = 0
dyns = []
if section.header.sh_type == SHT.DYNAMIC:
for i in range(int(len(section.bytes) / dyn_size)):
dyn = self._classes.DYN.from_buffer(section.raw, offset)
dyns.append(DynamicData(header=dyn, tag=DT[dyn.d_tag]))
if dyn.d_tag == DT.NULL:
break
offset += dyn_size
section.content = dyns
self._parseDynamicTags(dyns, sections)
def _parseDynamicTags(self, dyns, sections):
for dyn in dyns:
if dyn.header.d_tag == DT.NEEDED:
self.__parseDynamicTagNeeded(dyn, dyns, sections)
def __parseDynamicTagNeeded(self, dyn, dyns, sections):
dyn_strtab = None
for d in dyns:
if d.header.d_tag == DT.STRTAB:
dyn_strtab = d
if not dyn_strtab:
return
for section in sections:
if section.header.sh_addr == dyn_strtab.header.d_un:
dyn.val = get_str(section.raw, dyn.header.d_un)
break
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:4] == b'\x7fELF'
|
sashs/filebytes | filebytes/elf.py | ELF._parseRelocations | python | def _parseRelocations(self, sections):
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations | Parses the relocations and add those to the section | train | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L965-L971 | [
"def __parseRelocationEntries(self, section, symbols):\n struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA\n struct_size = sizeof(struct)\n offset = 0\n entries = []\n\n for i in range(int(section.header.sh_size / struct_size)):\n entry = struct.from_buf... | class ELF(Binary):
def __init__(self, fileName, fileContent=None):
super(ELF, self).__init__(fileName, fileContent)
self.__classes = self._getSuitableClasses(self._bytes)
if not self.__classes:
raise BinaryError('Bad architecture')
self.__elfHeader = self._parseElfHeader(self._bytes)
self.__segments = self._parseSegments(self._bytes, self.elfHeader)
self.__sections = self._parseSections(self._bytes, self.elfHeader)
self._parseSymbols(self.sections)
self._parseDynamic(self.sections)
self._parseRelocations(self.sections)
@property
def _classes(self):
return self.__classes
@property
def elfHeader(self):
return self.__elfHeader
@property
def sections(self):
return list(self.__sections)
@property
def segments(self):
return list(self.__segments)
@property
def programHeaders(self):
return list(self.__segments)
@property
def entryPoint(self):
return self.elfHeader.header.e_entry
@property
def imageBase(self):
return self.segments[0].header.p_vaddr - self.segments[0].header.p_offset if len(self.segments) > 0 else 0
def _getSuitableClasses(self, data):
"""Returns the class which holds the suitable classes for the loaded file"""
classes = None
if data[EI.CLASS] == ELFCLASS.BITS_32:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_32
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_32
elif data[EI.CLASS] == ELFCLASS.BITS_64:
if data[EI.DATA] == ELFDATA.LSB:
classes = LSB_64
elif data[EI.DATA] == ELFDATA.MSB:
classes = MSB_64
return classes
def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr)
def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments
def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs
def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab)
def __parseSymbolEntriesForSection(self, section, strtab):
entries = []
offset = 0
bytes_p = cast(pointer(section.raw), c_void_p)
sym_size = sizeof(self.__classes.SYM)
for i in range(int(section.header.sh_size / sym_size)):
entry = self.__classes.SYM.from_buffer(section.raw, offset)
name = get_str(strtab.raw, entry.st_name)
sym_data = SymbolData(header=entry, name=name, type=entry.st_info & 0xf, bind=entry.st_info >> 4)
entries.append(sym_data)
offset += sym_size
return entries
def __parseRelocationEntries(self, section, symbols):
struct = self.__classes.REL if section.header.sh_type == SHT.REL else self.__classes.RELA
struct_size = sizeof(struct)
offset = 0
entries = []
for i in range(int(section.header.sh_size / struct_size)):
entry = struct.from_buffer(section.raw, offset)
sym = symbols[self.__classes.R_SYM(entry.r_info)]
reloc_entry = RelocationData(header=entry, symbol=sym, type=self.__classes.R_TYPE(entry.r_info))
entries.append(reloc_entry)
offset += sizeof(struct)
return entries
def _parseDynamic(self, sections):
dyn_size = sizeof(self._classes.DYN)
for section in sections:
offset = 0
dyns = []
if section.header.sh_type == SHT.DYNAMIC:
for i in range(int(len(section.bytes) / dyn_size)):
dyn = self._classes.DYN.from_buffer(section.raw, offset)
dyns.append(DynamicData(header=dyn, tag=DT[dyn.d_tag]))
if dyn.d_tag == DT.NULL:
break
offset += dyn_size
section.content = dyns
self._parseDynamicTags(dyns, sections)
def _parseDynamicTags(self, dyns, sections):
for dyn in dyns:
if dyn.header.d_tag == DT.NEEDED:
self.__parseDynamicTagNeeded(dyn, dyns, sections)
def __parseDynamicTagNeeded(self, dyn, dyns, sections):
dyn_strtab = None
for d in dyns:
if d.header.d_tag == DT.STRTAB:
dyn_strtab = d
if not dyn_strtab:
return
for section in sections:
if section.header.sh_addr == dyn_strtab.header.d_un:
dyn.val = get_str(section.raw, dyn.header.d_un)
break
@classmethod
def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
return bytearray(fileContent)[:4] == b'\x7fELF'
|
bjodah/pycompilation | pycompilation/compilation.py | compile_sources | python | def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths | Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_ | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L85-L150 | [
"def copy(src, dst, only_update=False, copystat=True, cwd=None,\n dest_is_dir=False, create_dest_dirs=False, logger=None):\n \"\"\"\n Augmented shutil.copy with extra options and slightly\n modified behaviour\n\n Parameters\n ==========\n src: string\n path to source file\n dst: ... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | link | python | def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file | Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L153-L225 | [
"def get_abspath(path, cwd=None):\n if os.path.isabs(path):\n return path\n else:\n cwd = cwd or '.'\n if not os.path.isabs(cwd):\n cwd = os.path.abspath(cwd)\n return os.path.abspath(\n os.path.join(cwd, path)\n )\n",
"def expand_collection_in_dict(d... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | link_py_so | python | def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs) | Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L228-L308 | [
"def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,\n cwd=None, cplus=False, fort=False, **kwargs):\n \"\"\"\n Link object files.\n\n Parameters\n ----------\n obj_files: iterable of path strings\n out_file: path string (optional)\n path to executable/shared libr... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | simple_cythonize | python | def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile | Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L311-L381 | [
"def missing_or_other_newer(path, other_path, cwd=None):\n \"\"\"\n Investigate if path is non-existant or older than provided reference\n path.\n\n Parameters\n ==========\n path: string\n path to path which might be missing or too old\n other_path: string\n reference path\n c... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | src2obj | python | def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath | Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L398-L471 | [
"def missing_or_other_newer(path, other_path, cwd=None):\n \"\"\"\n Investigate if path is non-existant or older than provided reference\n path.\n\n Parameters\n ==========\n path: string\n path to path which might be missing or too old\n other_path: string\n reference path\n c... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | pyx2obj | python | def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs) | Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L474-L596 | [
"def src2obj(srcpath, CompilerRunner_=None, objpath=None,\n only_update=False, cwd=None, out_ext=None, inc_py=False,\n **kwargs):\n \"\"\"\n Compiles a source code file to an object file.\n Files ending with '.pyx' assumed to be cython files and\n are dispatched to pyx2obj.\n\n ... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | compile_link_import_py_ext | python | def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod | Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L617-L673 | [
"def compile_sources(files, CompilerRunner_=None,\n destdir=None, cwd=None,\n keep_dir_struct=False,\n per_file_kwargs=None,\n **kwargs):\n \"\"\"\n Compile source code files to object files.\n\n Parameters\n ----------\n fil... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs)
|
bjodah/pycompilation | pycompilation/compilation.py | compile_link_import_strings | python | def compile_link_import_strings(codes, build_dir=None, **kwargs):
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs) | Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext` | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L676-L717 | [
"def compile_link_import_py_ext(\n srcs, extname=None, build_dir=None, compile_kwargs=None,\n link_kwargs=None, **kwargs):\n \"\"\"\n Compiles sources in `srcs` to a shared object (python extension)\n which is imported. If shared object is newer than the sources, they\n are not recompiled ... | # -*- coding: utf-8 -*-
"""
Motivation
==========
Distutils does not allow to use object files in compilation
(see http://bugs.python.org/issue5372)
hence the compilation of source files cannot be cached
unless doing something like what compile_sources / src2obj do.
Distutils does not support fortran out of the box (motivation of
numpy distutils), furthermore:
linking mixed C++/Fortran use either Fortran (Intel) or
C++ (GNU) compiler.
"""
from __future__ import print_function, division, absolute_import
import glob
import os
import shutil
import sys
import tempfile
import warnings
from .util import (
MetaReaderWriter, missing_or_other_newer, get_abspath,
expand_collection_in_dict, make_dirs, copy, Glob, ArbitraryDepthGlob,
glob_at_depth, CompilationError, FileNotFoundError,
import_module_from_file, pyx_is_cplus,
md5_of_string, md5_of_file
)
from .runners import (
CCompilerRunner,
CppCompilerRunner,
FortranCompilerRunner
)
from distutils.sysconfig import get_config_var
sharedext = get_config_var('SO')
if os.name == 'posix': # Future improvement to make cross-platform
# flagprefix = '-'
objext = '.o'
elif os.name == 'nt':
# flagprefix = '/' <-- let's assume mingw compilers...
objext = '.obj'
else:
raise ImportError("Unknown os.name: {}".format(os.name))
def get_mixed_fort_c_linker(vendor=None, metadir=None, cplus=False,
cwd=None):
vendor = vendor or os.environ.get('COMPILER_VENDOR', None)
if not vendor:
metadir = get_abspath(metadir or '.', cwd=cwd)
reader = MetaReaderWriter('.metadata_CompilerRunner')
try:
vendor = reader.get_from_metadata_file(metadir, 'vendor')
except FileNotFoundError:
vendor = None
if vendor.lower() == 'intel':
if cplus:
return (FortranCompilerRunner,
{'flags': ['-nofor_main', '-cxxlib']}, vendor)
else:
return (FortranCompilerRunner,
{'flags': ['-nofor_main']}, vendor)
elif vendor.lower() == 'gnu' or 'llvm':
if cplus:
return (CppCompilerRunner,
{'lib_options': ['fortran']}, vendor)
else:
return (FortranCompilerRunner,
{}, vendor)
else:
raise ValueError("No vendor found.")
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths
def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file
def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs)
def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile
extension_mapping = {
'.c': (CCompilerRunner, None),
'.cpp': (CppCompilerRunner, None),
'.cxx': (CppCompilerRunner, None),
'.f': (FortranCompilerRunner, None),
'.for': (FortranCompilerRunner, None),
'.ftn': (FortranCompilerRunner, None),
'.f90': (FortranCompilerRunner, 'f2008'), # ifort only knows about .f90
'.f95': (FortranCompilerRunner, 'f95'),
'.f03': (FortranCompilerRunner, 'f2003'),
'.f08': (FortranCompilerRunner, 'f2008'),
}
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs)
def _any_X(srcs, cls):
for src in srcs:
name, ext = os.path.splitext(src)
key = ext.lower()
if key in extension_mapping:
if extension_mapping[key][0] == cls:
return True
return False
def any_fort(srcs):
return _any_X(srcs, FortranCompilerRunner)
def any_cplus(srcs):
return _any_X(srcs, CppCompilerRunner)
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod
|
bjodah/pycompilation | pycompilation/runners.py | CompilerRunner.find_compiler | python | def find_compiler(cls, preferred_vendor, metadir, cwd,
use_meta=True):
cwd = cwd or '.'
metadir = metadir or '.'
metadir = os.path.join(cwd, metadir)
used_metafile = False
if not preferred_vendor and use_meta:
try:
preferred_vendor = cls.get_from_metadata_file(
metadir, 'vendor')
used_metafile = True
except FileNotFoundError:
pass
candidates = list(cls.compiler_dict.keys())
if preferred_vendor:
if preferred_vendor in candidates:
candidates = [preferred_vendor]+candidates
else:
raise ValueError("Unknown vendor {}".format(
preferred_vendor))
name, path = find_binary_of_command([
cls.compiler_dict[x] for x in candidates])
if use_meta and not used_metafile:
if not os.path.isdir(metadir):
raise FileNotFoundError("Not a dir: {}".format(metadir))
cls.save_to_metadata_file(metadir, 'compiler',
(name, path))
cls.save_to_metadata_file(
metadir, 'vendor',
cls.compiler_name_vendor_mapping[name])
if cls.logger:
cls.logger.info(
'Wrote choice of compiler to: metadir')
return name, path, cls.compiler_name_vendor_mapping[name] | Identify a suitable C/fortran/other compiler
When it is possible that the user (un)installs a compiler
inbetween compilations of object files we want to catch
that. This method allows compiler choice to be stored in a
pickled metadata file. Provide metadir a dirpath to
make the class save choice there in a file with
cls.metadata_filename as name. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/runners.py#L245-L288 | [
"def find_binary_of_command(candidates):\n \"\"\"\n Calls `find_executable` from distuils for\n provided candidates and returns first hit.\n If no candidate mathces, a RuntimeError is raised\n \"\"\"\n from distutils.spawn import find_executable\n for c in candidates:\n binary_path = fin... | class CompilerRunner(object):
"""
CompilerRunner class.
Parameters
==========
sources: iterable of path strings
out: path string
flags: iterable of strings
run_linker: bool
compiler: string
compiler command to call
cwd: path string
root of relative paths
include_dirs: iterable of path strings
include directories
libraries: iterable of strings
libraries to link against.
library_dirs: iterable of path strings
paths to search for shared libraries
std: string
Standard string, e.g. c++11, c99, f2008
options: iterable of strings
pycompilation convenience tags (fast, warn, pic, openmp).
Sets extra compiler flags.
define: iterable of strings
macros to define
undef: iterable of strings
macros to undefine
logger: logging.Logger
info and error level used.
preferred_vendor: string
name of preferred vendor e.g. 'gnu' or 'intel'
metadir: path string
location where to cache metadata about compilation (choice of compiler)
lib_options: iterable of strings
pycompilation convenience tags e.g. 'openmp' and/or 'fortran'.
Sets extra libraries.
only_update: bool
Only run compiler if sources are newer than destination. default: False
Returns
=======
CompilerRunner instance
Methods
=======
run():
Invoke compilation as a subprocess. Log output if logger present.
"""
compiler_dict = None # Subclass to vendor/binary dict
# Standards should be a tuple of supported standards
# (first one will be the default)
standards = None
std_formater = None # Subclass to dict of binary/formater-callback
option_flag_dict = None # Lazy unified defaults for compilers
metadata_filename = '.metadata_CompilerRunner'
# subclass to be e.g. {'gcc': 'gnu', ...}
compiler_name_vendor_mapping = None
logger = None
default_compile_options = ('pic', 'warn', 'fast')
# http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor
# MKL 11.1 x86-64, *nix, MKLROOT env. set, dynamic linking
# This is _really_ ugly and not portable in any manner.
vendor_options_dict = {
'intel': {
'lapack': {
'linkline': [],
'libraries': ['mkl_avx', 'mkl_intel_lp64', 'mkl_core',
'mkl_intel_thread', 'pthread', 'm'],
'library_dirs': ['${MKLROOT}/lib/intel64'],
'include_dirs': ['${MKLROOT}/include/intel64/lp64',
'${MKLROOT}/include'],
'flags': ['-openmp'],
} if os.environ.get("INTEL_MKL_DYNAMIC", False) else {
'linkline': ['-Wl,--start-group ' +
' ${MKLROOT}/lib/intel64/libmkl_intel_ilp64.a' +
' ${MKLROOT}/lib/intel64/libmkl_core.a' +
' ${MKLROOT}/lib/intel64/libmkl_intel_thread.a' +
' -Wl,--end-group'],
'libraries': ['pthread', 'm'],
'library_dirs': ['${MKLROOT}/lib/intel64'],
'include_dirs': ['${MKLROOT}/include'],
'flags': ['-openmp'],
'def_macros': ['MKL_ILP64'],
}
},
'gnu': {
'lapack': {
'libraries': ['lapack', 'blas']
}
},
'llvm': {
'lapack': {
'libraries': ['lapack', 'blas']
}
},
}
def __init__(self, sources, out, flags=None, run_linker=True,
compiler=None, cwd=None, include_dirs=None, libraries=None,
library_dirs=None, std=None, options=None, define=None,
undef=None, strict_aliasing=None, logger=None,
preferred_vendor=None, metadir=None, lib_options=None,
only_update=False, **kwargs):
cwd = cwd or '.'
metadir = get_abspath(metadir or '.', cwd=cwd)
if hasattr(sources, '__iter__'):
self.sources = list(sources)
else:
self.sources = [sources]
self.out = out
self.flags = flags or []
if os.environ.get(self.environ_key_flags):
self.flags += os.environ[self.environ_key_flags].split()
self.metadir = metadir
self.cwd = cwd
if compiler or os.environ.get(self.environ_key_compiler):
if compiler:
self.compiler_name, self.compiler_binary = compiler
else:
self.compiler_binary = os.environ[self.environ_key_compiler]
for vk, cn in self.compiler_dict.items():
if cn in self.compiler_binary:
self.compiler_vendor = vk
self.compiler_name = cn
break
else:
self.compiler_vendor, self.compiler_name = list(self.compiler_dict.items())[0]
warnings.warn("unsure of what kind of compiler %s is, assuming %s" %
(self.compiler_binary, self.compiler_name))
self.save_to_metadata_file(
self.metadir, 'vendor',
self.compiler_name_vendor_mapping[
self.compiler_name])
else:
# Find a compiler
if preferred_vendor is None:
preferred_vendor = os.environ.get('COMPILER_VENDOR', None)
self.compiler_name, self.compiler_binary, \
self.compiler_vendor = self.find_compiler(
preferred_vendor, metadir, self.cwd)
if self.compiler_binary is None:
raise RuntimeError(
"No compiler found (searched: {0})".format(
', '.join(self.compiler_dict.values())))
self.define = define or []
self.undef = undef or []
self.include_dirs = include_dirs or []
self.libraries = libraries or []
self.library_dirs = library_dirs or []
self.options = options or self.default_compile_options
self.std = std or self.standards[0]
self.lib_options = lib_options or []
self.logger = logger
self.only_update = only_update
self.run_linker = run_linker
if self.run_linker:
# both gnu and intel compilers use '-c' for disabling linker
self.flags = list(filter(lambda x: x != '-c', self.flags))
else:
if '-c' not in self.flags:
self.flags.append('-c')
if self.std:
self.flags.append(self.std_formater[
self.compiler_name](self.std))
self.linkline = []
# Handle options
for opt in self.options:
self.flags.extend(self.option_flag_dict.get(
self.compiler_name, {}).get(opt, []))
# extend based on vendor options dict
def extend(l, k):
l.extend(
self.vendor_options_dict.get(
self.compiler_vendor, {}).get(
opt, {}).get(k, []))
for kw in ('flags', 'define', 'undef', 'include_dirs',
'library_dirs', 'libraries', 'linkline'):
extend(getattr(self, kw), kw)
# libraries
for lib_opt in self.lib_options:
self.libraries.extend(
self.lib_dict[self.compiler_name][lib_opt])
if strict_aliasing is not None:
nsa_re = re.compile("no-strict-aliasing$")
sa_re = re.compile("strict-aliasing$")
if strict_aliasing is True:
if any(map(nsa_re.match, flags)):
raise CompilationError("Strict aliasing cannot be" +
" both enforced and disabled")
elif any(map(sa_re.match, flags)):
pass # already enforced
else:
flags.append('-fstrict-aliasing')
elif strict_aliasing is False:
if any(map(nsa_re.match, flags)):
pass # already disabled
else:
if any(map(sa_re.match, flags)):
raise CompilationError("Strict aliasing cannot be" +
" both enforced and disabled")
else:
flags.append('-fno-strict-aliasing')
else:
raise ValueError("Unknown strict_aliasing={}".format(
strict_aliasing))
@classmethod
def cmd(self):
"""
The command below covers most cases, if you need
someting more complex subclass this.
"""
cmd = (
[self.compiler_binary] +
self.flags +
['-U'+x for x in self.undef] +
['-D'+x for x in self.define] +
['-I'+x for x in self.include_dirs] +
self.sources
)
if self.run_linker:
cmd += (['-L'+x for x in self.library_dirs] +
[(x if os.path.exists(x) else '-l'+x) for x in self.libraries] +
self.linkline)
counted = []
for envvar in re.findall('\$\{(\w+)\}', ' '.join(cmd)):
if os.getenv(envvar) is None:
if envvar not in counted:
counted.append(envvar)
msg = "Environment variable '{}' undefined.".format(
envvar)
self.logger.error(msg)
raise CompilationError(msg)
return cmd
def run(self):
if self.only_update:
for src in self.sources:
if missing_or_other_newer(self.out, src, cwd=self.cwd):
break
else:
msg = ('No source newer than {0}.' +
' Did not compile').format(
self.out)
if self.logger:
self.logger.info(msg)
else:
print(msg)
return self.out
self.flags = uniquify(self.flags)
# Append output flag and name to tail of flags
self.flags.extend(['-o', self.out])
# Logging
if self.logger:
self.logger.info(
'In "{0}", executing:\n"{1}"'.format(
self.cwd, ' '.join(self.cmd())))
env = os.environ.copy()
env['PWD'] = self.cwd
# NOTE: the ' '.join(self.cmd()) part seems to be necessary for
# intel compilers
p = subprocess.Popen(' '.join(self.cmd()),
shell=True,
cwd=self.cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
comm = p.communicate()
if sys.version_info[0] == 2:
self.cmd_outerr = comm[0]
else:
try:
self.cmd_outerr = comm[0].decode('utf-8')
except UnicodeDecodeError:
self.cmd_outerr = comm[0].decode('iso-8859-1') # win32
self.cmd_returncode = p.returncode
# Error handling
if self.cmd_returncode != 0:
msg = "Error executing '{0}' in {1}. Command exited with" + \
" status {2} after givning the following output: {3}\n"
raise CompilationError(msg.format(
' '.join(self.cmd()), self.cwd, str(self.cmd_returncode),
self.cmd_outerr))
if self.logger and len(self.cmd_outerr) > 0:
self.logger.info('...with output:\n'+self.cmd_outerr)
return self.cmd_outerr, self.cmd_returncode
|
bjodah/pycompilation | pycompilation/runners.py | CompilerRunner.cmd | python | def cmd(self):
cmd = (
[self.compiler_binary] +
self.flags +
['-U'+x for x in self.undef] +
['-D'+x for x in self.define] +
['-I'+x for x in self.include_dirs] +
self.sources
)
if self.run_linker:
cmd += (['-L'+x for x in self.library_dirs] +
[(x if os.path.exists(x) else '-l'+x) for x in self.libraries] +
self.linkline)
counted = []
for envvar in re.findall('\$\{(\w+)\}', ' '.join(cmd)):
if os.getenv(envvar) is None:
if envvar not in counted:
counted.append(envvar)
msg = "Environment variable '{}' undefined.".format(
envvar)
self.logger.error(msg)
raise CompilationError(msg)
return cmd | The command below covers most cases, if you need
someting more complex subclass this. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/runners.py#L290-L316 | null | class CompilerRunner(object):
"""
CompilerRunner class.
Parameters
==========
sources: iterable of path strings
out: path string
flags: iterable of strings
run_linker: bool
compiler: string
compiler command to call
cwd: path string
root of relative paths
include_dirs: iterable of path strings
include directories
libraries: iterable of strings
libraries to link against.
library_dirs: iterable of path strings
paths to search for shared libraries
std: string
Standard string, e.g. c++11, c99, f2008
options: iterable of strings
pycompilation convenience tags (fast, warn, pic, openmp).
Sets extra compiler flags.
define: iterable of strings
macros to define
undef: iterable of strings
macros to undefine
logger: logging.Logger
info and error level used.
preferred_vendor: string
name of preferred vendor e.g. 'gnu' or 'intel'
metadir: path string
location where to cache metadata about compilation (choice of compiler)
lib_options: iterable of strings
pycompilation convenience tags e.g. 'openmp' and/or 'fortran'.
Sets extra libraries.
only_update: bool
Only run compiler if sources are newer than destination. default: False
Returns
=======
CompilerRunner instance
Methods
=======
run():
Invoke compilation as a subprocess. Log output if logger present.
"""
compiler_dict = None # Subclass to vendor/binary dict
# Standards should be a tuple of supported standards
# (first one will be the default)
standards = None
std_formater = None # Subclass to dict of binary/formater-callback
option_flag_dict = None # Lazy unified defaults for compilers
metadata_filename = '.metadata_CompilerRunner'
# subclass to be e.g. {'gcc': 'gnu', ...}
compiler_name_vendor_mapping = None
logger = None
default_compile_options = ('pic', 'warn', 'fast')
# http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor
# MKL 11.1 x86-64, *nix, MKLROOT env. set, dynamic linking
# This is _really_ ugly and not portable in any manner.
vendor_options_dict = {
'intel': {
'lapack': {
'linkline': [],
'libraries': ['mkl_avx', 'mkl_intel_lp64', 'mkl_core',
'mkl_intel_thread', 'pthread', 'm'],
'library_dirs': ['${MKLROOT}/lib/intel64'],
'include_dirs': ['${MKLROOT}/include/intel64/lp64',
'${MKLROOT}/include'],
'flags': ['-openmp'],
} if os.environ.get("INTEL_MKL_DYNAMIC", False) else {
'linkline': ['-Wl,--start-group ' +
' ${MKLROOT}/lib/intel64/libmkl_intel_ilp64.a' +
' ${MKLROOT}/lib/intel64/libmkl_core.a' +
' ${MKLROOT}/lib/intel64/libmkl_intel_thread.a' +
' -Wl,--end-group'],
'libraries': ['pthread', 'm'],
'library_dirs': ['${MKLROOT}/lib/intel64'],
'include_dirs': ['${MKLROOT}/include'],
'flags': ['-openmp'],
'def_macros': ['MKL_ILP64'],
}
},
'gnu': {
'lapack': {
'libraries': ['lapack', 'blas']
}
},
'llvm': {
'lapack': {
'libraries': ['lapack', 'blas']
}
},
}
def __init__(self, sources, out, flags=None, run_linker=True,
compiler=None, cwd=None, include_dirs=None, libraries=None,
library_dirs=None, std=None, options=None, define=None,
undef=None, strict_aliasing=None, logger=None,
preferred_vendor=None, metadir=None, lib_options=None,
only_update=False, **kwargs):
cwd = cwd or '.'
metadir = get_abspath(metadir or '.', cwd=cwd)
if hasattr(sources, '__iter__'):
self.sources = list(sources)
else:
self.sources = [sources]
self.out = out
self.flags = flags or []
if os.environ.get(self.environ_key_flags):
self.flags += os.environ[self.environ_key_flags].split()
self.metadir = metadir
self.cwd = cwd
if compiler or os.environ.get(self.environ_key_compiler):
if compiler:
self.compiler_name, self.compiler_binary = compiler
else:
self.compiler_binary = os.environ[self.environ_key_compiler]
for vk, cn in self.compiler_dict.items():
if cn in self.compiler_binary:
self.compiler_vendor = vk
self.compiler_name = cn
break
else:
self.compiler_vendor, self.compiler_name = list(self.compiler_dict.items())[0]
warnings.warn("unsure of what kind of compiler %s is, assuming %s" %
(self.compiler_binary, self.compiler_name))
self.save_to_metadata_file(
self.metadir, 'vendor',
self.compiler_name_vendor_mapping[
self.compiler_name])
else:
# Find a compiler
if preferred_vendor is None:
preferred_vendor = os.environ.get('COMPILER_VENDOR', None)
self.compiler_name, self.compiler_binary, \
self.compiler_vendor = self.find_compiler(
preferred_vendor, metadir, self.cwd)
if self.compiler_binary is None:
raise RuntimeError(
"No compiler found (searched: {0})".format(
', '.join(self.compiler_dict.values())))
self.define = define or []
self.undef = undef or []
self.include_dirs = include_dirs or []
self.libraries = libraries or []
self.library_dirs = library_dirs or []
self.options = options or self.default_compile_options
self.std = std or self.standards[0]
self.lib_options = lib_options or []
self.logger = logger
self.only_update = only_update
self.run_linker = run_linker
if self.run_linker:
# both gnu and intel compilers use '-c' for disabling linker
self.flags = list(filter(lambda x: x != '-c', self.flags))
else:
if '-c' not in self.flags:
self.flags.append('-c')
if self.std:
self.flags.append(self.std_formater[
self.compiler_name](self.std))
self.linkline = []
# Handle options
for opt in self.options:
self.flags.extend(self.option_flag_dict.get(
self.compiler_name, {}).get(opt, []))
# extend based on vendor options dict
def extend(l, k):
l.extend(
self.vendor_options_dict.get(
self.compiler_vendor, {}).get(
opt, {}).get(k, []))
for kw in ('flags', 'define', 'undef', 'include_dirs',
'library_dirs', 'libraries', 'linkline'):
extend(getattr(self, kw), kw)
# libraries
for lib_opt in self.lib_options:
self.libraries.extend(
self.lib_dict[self.compiler_name][lib_opt])
if strict_aliasing is not None:
nsa_re = re.compile("no-strict-aliasing$")
sa_re = re.compile("strict-aliasing$")
if strict_aliasing is True:
if any(map(nsa_re.match, flags)):
raise CompilationError("Strict aliasing cannot be" +
" both enforced and disabled")
elif any(map(sa_re.match, flags)):
pass # already enforced
else:
flags.append('-fstrict-aliasing')
elif strict_aliasing is False:
if any(map(nsa_re.match, flags)):
pass # already disabled
else:
if any(map(sa_re.match, flags)):
raise CompilationError("Strict aliasing cannot be" +
" both enforced and disabled")
else:
flags.append('-fno-strict-aliasing')
else:
raise ValueError("Unknown strict_aliasing={}".format(
strict_aliasing))
@classmethod
def find_compiler(cls, preferred_vendor, metadir, cwd,
use_meta=True):
"""
Identify a suitable C/fortran/other compiler
When it is possible that the user (un)installs a compiler
inbetween compilations of object files we want to catch
that. This method allows compiler choice to be stored in a
pickled metadata file. Provide metadir a dirpath to
make the class save choice there in a file with
cls.metadata_filename as name.
"""
cwd = cwd or '.'
metadir = metadir or '.'
metadir = os.path.join(cwd, metadir)
used_metafile = False
if not preferred_vendor and use_meta:
try:
preferred_vendor = cls.get_from_metadata_file(
metadir, 'vendor')
used_metafile = True
except FileNotFoundError:
pass
candidates = list(cls.compiler_dict.keys())
if preferred_vendor:
if preferred_vendor in candidates:
candidates = [preferred_vendor]+candidates
else:
raise ValueError("Unknown vendor {}".format(
preferred_vendor))
name, path = find_binary_of_command([
cls.compiler_dict[x] for x in candidates])
if use_meta and not used_metafile:
if not os.path.isdir(metadir):
raise FileNotFoundError("Not a dir: {}".format(metadir))
cls.save_to_metadata_file(metadir, 'compiler',
(name, path))
cls.save_to_metadata_file(
metadir, 'vendor',
cls.compiler_name_vendor_mapping[name])
if cls.logger:
cls.logger.info(
'Wrote choice of compiler to: metadir')
return name, path, cls.compiler_name_vendor_mapping[name]
def run(self):
if self.only_update:
for src in self.sources:
if missing_or_other_newer(self.out, src, cwd=self.cwd):
break
else:
msg = ('No source newer than {0}.' +
' Did not compile').format(
self.out)
if self.logger:
self.logger.info(msg)
else:
print(msg)
return self.out
self.flags = uniquify(self.flags)
# Append output flag and name to tail of flags
self.flags.extend(['-o', self.out])
# Logging
if self.logger:
self.logger.info(
'In "{0}", executing:\n"{1}"'.format(
self.cwd, ' '.join(self.cmd())))
env = os.environ.copy()
env['PWD'] = self.cwd
# NOTE: the ' '.join(self.cmd()) part seems to be necessary for
# intel compilers
p = subprocess.Popen(' '.join(self.cmd()),
shell=True,
cwd=self.cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
comm = p.communicate()
if sys.version_info[0] == 2:
self.cmd_outerr = comm[0]
else:
try:
self.cmd_outerr = comm[0].decode('utf-8')
except UnicodeDecodeError:
self.cmd_outerr = comm[0].decode('iso-8859-1') # win32
self.cmd_returncode = p.returncode
# Error handling
if self.cmd_returncode != 0:
msg = "Error executing '{0}' in {1}. Command exited with" + \
" status {2} after givning the following output: {3}\n"
raise CompilationError(msg.format(
' '.join(self.cmd()), self.cwd, str(self.cmd_returncode),
self.cmd_outerr))
if self.logger and len(self.cmd_outerr) > 0:
self.logger.info('...with output:\n'+self.cmd_outerr)
return self.cmd_outerr, self.cmd_returncode
|
bjodah/pycompilation | pycompilation/dist.py | PCExtension | python | def PCExtension(*args, **kwargs):
vals = {}
intercept = {
'build_callbacks': (), # tuple of (callback, args, kwargs)
'link_ext': True,
'build_files': (),
'dist_files': (), # work around stackoverflow.com/questions/2994396/
'template_regexps': [],
'pass_extra_compile_args': False, # use distutils extra_compile_args?
'pycompilation_compile_kwargs': {},
'pycompilation_link_kwargs': {},
}
for k, v in intercept.items():
vals[k] = kwargs.pop(k, v)
intercept2 = {
'logger': None,
'only_update': True,
}
for k, v in intercept2.items():
vck = kwargs.pop(k, v)
vck = vals['pycompilation_compile_kwargs'].pop(k, vck)
vck = vck or vals['pycompilation_link_kwargs'].pop(k, vck)
vals[k] = vck
instance = Extension(*args, **kwargs)
if vals['logger'] is True:
# interpret as we should instantiate a logger
import logging
logging.basicConfig(level=logging.DEBUG)
vals['logger'] = logging.getLogger('PCExtension')
for k, v in vals.items():
setattr(instance, k, v)
return instance | Parameters
==========
template_regexps: list of 3-tuples
e.g. [(pattern1, target1, subsd1), ...], used to generate
templated code
pass_extra_compile_args: bool
should ext.extra_compile_args be passed along? default: False | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/dist.py#L25-L71 | null | # -*- coding: utf-8 -*-
"""
Interaction with distutils
"""
from __future__ import print_function, division, absolute_import
import os
import re
from distutils.command import build_ext, sdist
from distutils.extension import Extension
from .compilation import (
compile_sources, link_py_so, any_fort,
any_cplus, simple_cythonize
)
from .util import (
copy, get_abspath, missing_or_other_newer,
MetaReaderWriter, FileNotFoundError, pyx_is_cplus, make_dirs
)
def _copy_or_render_source(ext, f, output_dir, render_callback,
skip_copy=False):
"""
Tries to do regex match for each (pattern, target, subsd) tuple
in ext.template_regexps for file f.
"""
# Either render a template or copy the source
dirname = os.path.dirname(f)
filename = os.path.basename(f)
for pattern, target, subsd in ext.template_regexps:
if re.match(pattern, filename):
tgt = os.path.join(dirname, re.sub(
pattern, target, filename))
rw = MetaReaderWriter('.metadata_subsd')
try:
prev_subsd = rw.get_from_metadata_file(output_dir, f)
except (FileNotFoundError, KeyError):
prev_subsd = None
render_callback(
get_abspath(f),
os.path.join(output_dir, tgt),
subsd,
only_update=ext.only_update,
prev_subsd=prev_subsd,
create_dest_dirs=True,
logger=ext.logger)
rw.save_to_metadata_file(output_dir, f, subsd)
return tgt
else:
if not skip_copy:
copy(f,
os.path.join(output_dir,
os.path.dirname(f)),
only_update=ext.only_update,
dest_is_dir=True,
create_dest_dirs=True,
logger=ext.logger)
return f
def render_python_template_to(src, dest, subsd, only_update=False,
prev_subsd=None, create_dest_dirs=True,
logger=None):
"""
Overload this function if you want to use a template engine such as
e.g. mako.
"""
if only_update:
if subsd == prev_subsd:
if not missing_or_other_newer(dest, src):
if logger:
msg = ("Did not re-render {}. "
"(destination newer + same dict)")
logger.info(msg.format(src))
return
with open(src, 'rt') as ifh:
data = ifh.read() # Don't go crazy on file size...
if create_dest_dirs:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
make_dirs(dest_dir)
with open(dest, 'wt') as ofh:
ofh.write(data % subsd)
class pc_build_ext(build_ext.build_ext):
"""
build_ext class for PCExtension
Support for template_regexps
"""
render_callback = staticmethod(render_python_template_to)
def run(self):
if self.dry_run:
return # honor the --dry-run flag
for ext in self.extensions:
sources = []
if ext.logger:
ext.logger.info("Copying/rendering sources...")
for f in ext.sources:
sources.append(_copy_or_render_source(
ext, f, self.build_temp, self.render_callback))
if ext.logger:
ext.logger.info("Copying build_files...")
for f in ext.build_files:
copy(f, os.path.join(self.build_temp,
os.path.dirname(f)),
only_update=ext.only_update,
dest_is_dir=True,
create_dest_dirs=True,
logger=ext.logger)
if ext.pass_extra_compile_args:
# By default we do not pass extra_compile_kwargs
# since it contains '-fno-strict-aliasing' which
# harms performance.
ext.pycompilation_compile_kwargs['flags'] =\
ext.extra_compile_args,
if ext.define_macros:
ext.pycompilation_compile_kwargs['define'] =\
list(set(ext.define_macros +
ext.pycompilation_compile_kwargs['define']))
if ext.undef_macros:
ext.pycompilation_compile_kwargs['undef'] =\
list(set(ext.undef_macros +
ext.pycompilation_compile_kwargs['undef']))
# Run build_callbaks if any were provided
for cb, args, kwargs in ext.build_callbacks:
cb(self.build_temp, self.get_ext_fullpath(
ext.name), ext, *args, **kwargs)
# Compile sources to object files
src_objs = compile_sources(
sources,
cwd=self.build_temp,
include_dirs=list(map(get_abspath, ext.include_dirs)),
logger=ext.logger,
only_update=ext.only_update,
**ext.pycompilation_compile_kwargs
)
if ext.logger:
ext.logger.info(
"Copying files needed for distribution..")
for f, rel_dst in ext.dist_files:
rel_dst = rel_dst or os.path.basename(f)
copy(
f,
os.path.join(
os.path.dirname(self.get_ext_fullpath(ext.name)),
rel_dst,
),
only_update=ext.only_update,
logger=ext.logger,
)
# Link objects to a shared object
if ext.link_ext:
abs_so_path = link_py_so(
src_objs+ext.extra_objects,
cwd=self.build_temp,
flags=ext.extra_link_args,
library_dirs=list(map(get_abspath, ext.library_dirs)),
libraries=ext.libraries,
fort=any_fort(sources),
cplus=(((ext.language or '').lower() == 'c++') or
any_cplus(sources)),
logger=ext.logger,
only_update=ext.only_update,
**ext.pycompilation_link_kwargs
)
copy(
abs_so_path, self.get_ext_fullpath(ext.name),
only_update=ext.only_update,
create_dest_dirs=True, logger=ext.logger
)
class pc_sdist(sdist.sdist):
render_callback = staticmethod(render_python_template_to)
def run(self):
for ext in self.distribution.ext_modules:
_sources = []
for src in ext.sources:
if src.endswith('.pyx'):
cy_kwargs = {
'cplus': pyx_is_cplus(src),
'include_path': ext.include_dirs
}
_sources.append(simple_cythonize(
src, os.path.dirname(src), **cy_kwargs))
else:
# Copy or render
_sources.append(_copy_or_render_source(
ext, src, '.',
self.render_callback, skip_copy=True))
ext.sources = _sources
sdist.sdist.run(self)
|
bjodah/pycompilation | pycompilation/dist.py | _copy_or_render_source | python | def _copy_or_render_source(ext, f, output_dir, render_callback,
skip_copy=False):
# Either render a template or copy the source
dirname = os.path.dirname(f)
filename = os.path.basename(f)
for pattern, target, subsd in ext.template_regexps:
if re.match(pattern, filename):
tgt = os.path.join(dirname, re.sub(
pattern, target, filename))
rw = MetaReaderWriter('.metadata_subsd')
try:
prev_subsd = rw.get_from_metadata_file(output_dir, f)
except (FileNotFoundError, KeyError):
prev_subsd = None
render_callback(
get_abspath(f),
os.path.join(output_dir, tgt),
subsd,
only_update=ext.only_update,
prev_subsd=prev_subsd,
create_dest_dirs=True,
logger=ext.logger)
rw.save_to_metadata_file(output_dir, f, subsd)
return tgt
else:
if not skip_copy:
copy(f,
os.path.join(output_dir,
os.path.dirname(f)),
only_update=ext.only_update,
dest_is_dir=True,
create_dest_dirs=True,
logger=ext.logger)
return f | Tries to do regex match for each (pattern, target, subsd) tuple
in ext.template_regexps for file f. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/dist.py#L74-L112 | [
"def MetaReaderWriter(filename):\n class ReaderWriter(HasMetaData):\n metadata_filename = filename\n return ReaderWriter()\n",
"def get_abspath(path, cwd=None):\n if os.path.isabs(path):\n return path\n else:\n cwd = cwd or '.'\n if not os.path.isabs(cwd):\n cwd ... | # -*- coding: utf-8 -*-
"""
Interaction with distutils
"""
from __future__ import print_function, division, absolute_import
import os
import re
from distutils.command import build_ext, sdist
from distutils.extension import Extension
from .compilation import (
compile_sources, link_py_so, any_fort,
any_cplus, simple_cythonize
)
from .util import (
copy, get_abspath, missing_or_other_newer,
MetaReaderWriter, FileNotFoundError, pyx_is_cplus, make_dirs
)
def PCExtension(*args, **kwargs):
"""
Parameters
==========
template_regexps: list of 3-tuples
e.g. [(pattern1, target1, subsd1), ...], used to generate
templated code
pass_extra_compile_args: bool
should ext.extra_compile_args be passed along? default: False
"""
vals = {}
intercept = {
'build_callbacks': (), # tuple of (callback, args, kwargs)
'link_ext': True,
'build_files': (),
'dist_files': (), # work around stackoverflow.com/questions/2994396/
'template_regexps': [],
'pass_extra_compile_args': False, # use distutils extra_compile_args?
'pycompilation_compile_kwargs': {},
'pycompilation_link_kwargs': {},
}
for k, v in intercept.items():
vals[k] = kwargs.pop(k, v)
intercept2 = {
'logger': None,
'only_update': True,
}
for k, v in intercept2.items():
vck = kwargs.pop(k, v)
vck = vals['pycompilation_compile_kwargs'].pop(k, vck)
vck = vck or vals['pycompilation_link_kwargs'].pop(k, vck)
vals[k] = vck
instance = Extension(*args, **kwargs)
if vals['logger'] is True:
# interpret as we should instantiate a logger
import logging
logging.basicConfig(level=logging.DEBUG)
vals['logger'] = logging.getLogger('PCExtension')
for k, v in vals.items():
setattr(instance, k, v)
return instance
def render_python_template_to(src, dest, subsd, only_update=False,
prev_subsd=None, create_dest_dirs=True,
logger=None):
"""
Overload this function if you want to use a template engine such as
e.g. mako.
"""
if only_update:
if subsd == prev_subsd:
if not missing_or_other_newer(dest, src):
if logger:
msg = ("Did not re-render {}. "
"(destination newer + same dict)")
logger.info(msg.format(src))
return
with open(src, 'rt') as ifh:
data = ifh.read() # Don't go crazy on file size...
if create_dest_dirs:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
make_dirs(dest_dir)
with open(dest, 'wt') as ofh:
ofh.write(data % subsd)
class pc_build_ext(build_ext.build_ext):
"""
build_ext class for PCExtension
Support for template_regexps
"""
render_callback = staticmethod(render_python_template_to)
def run(self):
if self.dry_run:
return # honor the --dry-run flag
for ext in self.extensions:
sources = []
if ext.logger:
ext.logger.info("Copying/rendering sources...")
for f in ext.sources:
sources.append(_copy_or_render_source(
ext, f, self.build_temp, self.render_callback))
if ext.logger:
ext.logger.info("Copying build_files...")
for f in ext.build_files:
copy(f, os.path.join(self.build_temp,
os.path.dirname(f)),
only_update=ext.only_update,
dest_is_dir=True,
create_dest_dirs=True,
logger=ext.logger)
if ext.pass_extra_compile_args:
# By default we do not pass extra_compile_kwargs
# since it contains '-fno-strict-aliasing' which
# harms performance.
ext.pycompilation_compile_kwargs['flags'] =\
ext.extra_compile_args,
if ext.define_macros:
ext.pycompilation_compile_kwargs['define'] =\
list(set(ext.define_macros +
ext.pycompilation_compile_kwargs['define']))
if ext.undef_macros:
ext.pycompilation_compile_kwargs['undef'] =\
list(set(ext.undef_macros +
ext.pycompilation_compile_kwargs['undef']))
# Run build_callbaks if any were provided
for cb, args, kwargs in ext.build_callbacks:
cb(self.build_temp, self.get_ext_fullpath(
ext.name), ext, *args, **kwargs)
# Compile sources to object files
src_objs = compile_sources(
sources,
cwd=self.build_temp,
include_dirs=list(map(get_abspath, ext.include_dirs)),
logger=ext.logger,
only_update=ext.only_update,
**ext.pycompilation_compile_kwargs
)
if ext.logger:
ext.logger.info(
"Copying files needed for distribution..")
for f, rel_dst in ext.dist_files:
rel_dst = rel_dst or os.path.basename(f)
copy(
f,
os.path.join(
os.path.dirname(self.get_ext_fullpath(ext.name)),
rel_dst,
),
only_update=ext.only_update,
logger=ext.logger,
)
# Link objects to a shared object
if ext.link_ext:
abs_so_path = link_py_so(
src_objs+ext.extra_objects,
cwd=self.build_temp,
flags=ext.extra_link_args,
library_dirs=list(map(get_abspath, ext.library_dirs)),
libraries=ext.libraries,
fort=any_fort(sources),
cplus=(((ext.language or '').lower() == 'c++') or
any_cplus(sources)),
logger=ext.logger,
only_update=ext.only_update,
**ext.pycompilation_link_kwargs
)
copy(
abs_so_path, self.get_ext_fullpath(ext.name),
only_update=ext.only_update,
create_dest_dirs=True, logger=ext.logger
)
class pc_sdist(sdist.sdist):
render_callback = staticmethod(render_python_template_to)
def run(self):
for ext in self.distribution.ext_modules:
_sources = []
for src in ext.sources:
if src.endswith('.pyx'):
cy_kwargs = {
'cplus': pyx_is_cplus(src),
'include_path': ext.include_dirs
}
_sources.append(simple_cythonize(
src, os.path.dirname(src), **cy_kwargs))
else:
# Copy or render
_sources.append(_copy_or_render_source(
ext, src, '.',
self.render_callback, skip_copy=True))
ext.sources = _sources
sdist.sdist.run(self)
|
bjodah/pycompilation | pycompilation/dist.py | render_python_template_to | python | def render_python_template_to(src, dest, subsd, only_update=False,
prev_subsd=None, create_dest_dirs=True,
logger=None):
if only_update:
if subsd == prev_subsd:
if not missing_or_other_newer(dest, src):
if logger:
msg = ("Did not re-render {}. "
"(destination newer + same dict)")
logger.info(msg.format(src))
return
with open(src, 'rt') as ifh:
data = ifh.read() # Don't go crazy on file size...
if create_dest_dirs:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
make_dirs(dest_dir)
with open(dest, 'wt') as ofh:
ofh.write(data % subsd) | Overload this function if you want to use a template engine such as
e.g. mako. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/dist.py#L115-L140 | [
"def missing_or_other_newer(path, other_path, cwd=None):\n \"\"\"\n Investigate if path is non-existant or older than provided reference\n path.\n\n Parameters\n ==========\n path: string\n path to path which might be missing or too old\n other_path: string\n reference path\n c... | # -*- coding: utf-8 -*-
"""
Interaction with distutils
"""
from __future__ import print_function, division, absolute_import
import os
import re
from distutils.command import build_ext, sdist
from distutils.extension import Extension
from .compilation import (
compile_sources, link_py_so, any_fort,
any_cplus, simple_cythonize
)
from .util import (
copy, get_abspath, missing_or_other_newer,
MetaReaderWriter, FileNotFoundError, pyx_is_cplus, make_dirs
)
def PCExtension(*args, **kwargs):
"""
Parameters
==========
template_regexps: list of 3-tuples
e.g. [(pattern1, target1, subsd1), ...], used to generate
templated code
pass_extra_compile_args: bool
should ext.extra_compile_args be passed along? default: False
"""
vals = {}
intercept = {
'build_callbacks': (), # tuple of (callback, args, kwargs)
'link_ext': True,
'build_files': (),
'dist_files': (), # work around stackoverflow.com/questions/2994396/
'template_regexps': [],
'pass_extra_compile_args': False, # use distutils extra_compile_args?
'pycompilation_compile_kwargs': {},
'pycompilation_link_kwargs': {},
}
for k, v in intercept.items():
vals[k] = kwargs.pop(k, v)
intercept2 = {
'logger': None,
'only_update': True,
}
for k, v in intercept2.items():
vck = kwargs.pop(k, v)
vck = vals['pycompilation_compile_kwargs'].pop(k, vck)
vck = vck or vals['pycompilation_link_kwargs'].pop(k, vck)
vals[k] = vck
instance = Extension(*args, **kwargs)
if vals['logger'] is True:
# interpret as we should instantiate a logger
import logging
logging.basicConfig(level=logging.DEBUG)
vals['logger'] = logging.getLogger('PCExtension')
for k, v in vals.items():
setattr(instance, k, v)
return instance
def _copy_or_render_source(ext, f, output_dir, render_callback,
skip_copy=False):
"""
Tries to do regex match for each (pattern, target, subsd) tuple
in ext.template_regexps for file f.
"""
# Either render a template or copy the source
dirname = os.path.dirname(f)
filename = os.path.basename(f)
for pattern, target, subsd in ext.template_regexps:
if re.match(pattern, filename):
tgt = os.path.join(dirname, re.sub(
pattern, target, filename))
rw = MetaReaderWriter('.metadata_subsd')
try:
prev_subsd = rw.get_from_metadata_file(output_dir, f)
except (FileNotFoundError, KeyError):
prev_subsd = None
render_callback(
get_abspath(f),
os.path.join(output_dir, tgt),
subsd,
only_update=ext.only_update,
prev_subsd=prev_subsd,
create_dest_dirs=True,
logger=ext.logger)
rw.save_to_metadata_file(output_dir, f, subsd)
return tgt
else:
if not skip_copy:
copy(f,
os.path.join(output_dir,
os.path.dirname(f)),
only_update=ext.only_update,
dest_is_dir=True,
create_dest_dirs=True,
logger=ext.logger)
return f
class pc_build_ext(build_ext.build_ext):
"""
build_ext class for PCExtension
Support for template_regexps
"""
render_callback = staticmethod(render_python_template_to)
def run(self):
if self.dry_run:
return # honor the --dry-run flag
for ext in self.extensions:
sources = []
if ext.logger:
ext.logger.info("Copying/rendering sources...")
for f in ext.sources:
sources.append(_copy_or_render_source(
ext, f, self.build_temp, self.render_callback))
if ext.logger:
ext.logger.info("Copying build_files...")
for f in ext.build_files:
copy(f, os.path.join(self.build_temp,
os.path.dirname(f)),
only_update=ext.only_update,
dest_is_dir=True,
create_dest_dirs=True,
logger=ext.logger)
if ext.pass_extra_compile_args:
# By default we do not pass extra_compile_kwargs
# since it contains '-fno-strict-aliasing' which
# harms performance.
ext.pycompilation_compile_kwargs['flags'] =\
ext.extra_compile_args,
if ext.define_macros:
ext.pycompilation_compile_kwargs['define'] =\
list(set(ext.define_macros +
ext.pycompilation_compile_kwargs['define']))
if ext.undef_macros:
ext.pycompilation_compile_kwargs['undef'] =\
list(set(ext.undef_macros +
ext.pycompilation_compile_kwargs['undef']))
# Run build_callbaks if any were provided
for cb, args, kwargs in ext.build_callbacks:
cb(self.build_temp, self.get_ext_fullpath(
ext.name), ext, *args, **kwargs)
# Compile sources to object files
src_objs = compile_sources(
sources,
cwd=self.build_temp,
include_dirs=list(map(get_abspath, ext.include_dirs)),
logger=ext.logger,
only_update=ext.only_update,
**ext.pycompilation_compile_kwargs
)
if ext.logger:
ext.logger.info(
"Copying files needed for distribution..")
for f, rel_dst in ext.dist_files:
rel_dst = rel_dst or os.path.basename(f)
copy(
f,
os.path.join(
os.path.dirname(self.get_ext_fullpath(ext.name)),
rel_dst,
),
only_update=ext.only_update,
logger=ext.logger,
)
# Link objects to a shared object
if ext.link_ext:
abs_so_path = link_py_so(
src_objs+ext.extra_objects,
cwd=self.build_temp,
flags=ext.extra_link_args,
library_dirs=list(map(get_abspath, ext.library_dirs)),
libraries=ext.libraries,
fort=any_fort(sources),
cplus=(((ext.language or '').lower() == 'c++') or
any_cplus(sources)),
logger=ext.logger,
only_update=ext.only_update,
**ext.pycompilation_link_kwargs
)
copy(
abs_so_path, self.get_ext_fullpath(ext.name),
only_update=ext.only_update,
create_dest_dirs=True, logger=ext.logger
)
class pc_sdist(sdist.sdist):
render_callback = staticmethod(render_python_template_to)
def run(self):
for ext in self.distribution.ext_modules:
_sources = []
for src in ext.sources:
if src.endswith('.pyx'):
cy_kwargs = {
'cplus': pyx_is_cplus(src),
'include_path': ext.include_dirs
}
_sources.append(simple_cythonize(
src, os.path.dirname(src), **cy_kwargs))
else:
# Copy or render
_sources.append(_copy_or_render_source(
ext, src, '.',
self.render_callback, skip_copy=True))
ext.sources = _sources
sdist.sdist.run(self)
|
bjodah/pycompilation | pycompilation/util.py | expand_collection_in_dict | python | def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items | Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True) | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L22-L44 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | copy | python | def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst | Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L92-L178 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | md5_of_file | python | def md5_of_file(path, nblocks=128):
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md | Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L181-L199 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | missing_or_other_newer | python | def missing_or_other_newer(path, other_path, cwd=None):
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False | Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L208-L234 | [
"def get_abspath(path, cwd=None):\n if os.path.isabs(path):\n return path\n else:\n cwd = cwd or '.'\n if not os.path.isabs(cwd):\n cwd = os.path.abspath(cwd)\n return os.path.abspath(\n os.path.join(cwd, path)\n )\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | import_module_from_file | python | def import_module_from_file(filename, only_if_newer_than=None):
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod | Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L281-L318 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | find_binary_of_command | python | def find_binary_of_command(candidates):
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates)) | Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L321-L333 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | pyx_is_cplus | python | def pyx_is_cplus(path):
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False | Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L336-L353 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result
|
bjodah/pycompilation | pycompilation/util.py | uniquify | python | def uniquify(l):
result = []
for x in l:
if x not in result:
result.append(x)
return result | Uniquify a list (skip duplicate items). | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L356-L364 | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
import os
import pickle
import shutil
from collections import namedtuple
from hashlib import md5
class CompilationError(Exception):
pass
class FileNotFoundError(Exception):
pass
def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items
Glob = namedtuple('Glob', 'pathname')
ArbitraryDepthGlob = namedtuple('ArbitraryDepthGlob', 'filename')
def glob_at_depth(filename_glob, cwd=None):
if cwd is not None:
cwd = '.'
globbed = []
for root, dirs, filenames in os.walk(cwd):
for fn in filenames:
if fnmatch.fnmatch(fn, filename_glob):
globbed.append(os.path.join(root, fn))
return globbed
def get_abspath(path, cwd=None):
if os.path.isabs(path):
return path
else:
cwd = cwd or '.'
if not os.path.isabs(cwd):
cwd = os.path.abspath(cwd)
return os.path.abspath(
os.path.join(cwd, path)
)
def make_dirs(path, logger=None):
if path[-1] == '/':
parent = os.path.dirname(path[:-1])
else:
parent = os.path.dirname(path)
if len(parent) > 0:
if not os.path.exists(parent):
make_dirs(parent, logger=logger)
if not os.path.exists(path):
if logger:
logger.info("Making dir: "+path)
os.mkdir(path, 0o777)
else:
assert os.path.isdir(path)
def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst
def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md
def md5_of_string(string):
md = md5()
md.update(string)
return md
def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False
class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
def MetaReaderWriter(filename):
class ReaderWriter(HasMetaData):
metadata_filename = filename
return ReaderWriter()
def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod
def find_binary_of_command(candidates):
"""
Calls `find_executable` from distuils for
provided candidates and returns first hit.
If no candidate mathces, a RuntimeError is raised
"""
from distutils.spawn import find_executable
for c in candidates:
binary_path = find_executable(c)
if c and binary_path:
return c, binary_path
raise RuntimeError('No binary located for candidates: {}'.format(
candidates))
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
|
bjodah/pycompilation | pycompilation/util.py | HasMetaData.get_from_metadata_file | python | def get_from_metadata_file(cls, dirpath, key):
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath)) | Get value of key in metadata file dict. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L249-L259 | null | class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
@classmethod
def save_to_metadata_file(cls, dirpath, key, value):
"""
Store `key: value` in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb'))
|
bjodah/pycompilation | pycompilation/util.py | HasMetaData.save_to_metadata_file | python | def save_to_metadata_file(cls, dirpath, key, value):
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
d.update({key: value})
pickle.dump(d, open(fullpath, 'wb'))
else:
pickle.dump({key: value}, open(fullpath, 'wb')) | Store `key: value` in metadata file dict. | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L262-L272 | null | class HasMetaData(object):
"""
Provides convenice classmethods for a class to pickle some metadata.
"""
metadata_filename = '.metadata'
@classmethod
def _get_metadata_key(cls, kw):
""" kw could be e.g. 'compiler' """
return cls.__name__+'_'+kw
@classmethod
def get_from_metadata_file(cls, dirpath, key):
"""
Get value of key in metadata file dict.
"""
fullpath = os.path.join(dirpath, cls.metadata_filename)
if os.path.exists(fullpath):
d = pickle.load(open(fullpath, 'rb'))
return d[key]
else:
raise FileNotFoundError(
"No such file: {0}".format(fullpath))
@classmethod
|
davidhuser/dhis2.py | dhis2/logger.py | _set_log_format | python | def _set_log_format(color, include_caller):
level_name = '* %(levelname)1s'
time = '%(asctime)s,%(msecs)03d'
message = '%(message)s'
color_start = '%(color)s'
color_end = '%(end_color)s'
caller = '[%(module)s:%(lineno)d]'
if color:
if include_caller:
return '{}{}{} {} {} {}'.format(color_start, level_name, color_end, time, message, caller)
else:
return '{}{}{} {} {}'.format(color_start, level_name, color_end, time, message)
else:
if include_caller:
return '{} {} {} {}'.format(level_name, time, message, caller)
else:
return '{} {} {}'.format(level_name, time, message) | Set log format
:param color: Log message is colored
:param include_caller: At the end, put a [caller:line-of-code], e.g. [script:123]
:return: string of log format | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/logger.py#L14-L37 | null | # -*- coding: utf-8 -*-
"""
dhis2.logger
~~~~~~~~~~~~~~~~~
This module sets up logzero loggers.
"""
import logging
import logzero
def setup_logger(logfile=None, backup_count=20, log_level=logging.INFO, include_caller=True):
"""
Setup logzero logger. if logfile is specified, create additional file logger
:param logfile: path to log file destination
:param backup_count: number of rotating files
:param log_level: min. log level FOR FILE LOGGING
:param include_caller: whether to include the caller in the log output to STDOUT, e.g. [script:123]
"""
formatter = logzero.LogFormatter(
fmt=_set_log_format(color=True, include_caller=include_caller),
datefmt='%Y-%m-%d %H:%M:%S'
)
logzero.setup_default_logger(formatter=formatter)
if logfile:
formatter = logzero.LogFormatter(
fmt=_set_log_format(color=False, include_caller=True),
datefmt='%Y-%m-%d %H:%M:%S')
logzero.logfile(logfile, formatter=formatter, loglevel=log_level, maxBytes=int(1e7), backupCount=backup_count)
|
davidhuser/dhis2.py | dhis2/logger.py | setup_logger | python | def setup_logger(logfile=None, backup_count=20, log_level=logging.INFO, include_caller=True):
formatter = logzero.LogFormatter(
fmt=_set_log_format(color=True, include_caller=include_caller),
datefmt='%Y-%m-%d %H:%M:%S'
)
logzero.setup_default_logger(formatter=formatter)
if logfile:
formatter = logzero.LogFormatter(
fmt=_set_log_format(color=False, include_caller=True),
datefmt='%Y-%m-%d %H:%M:%S')
logzero.logfile(logfile, formatter=formatter, loglevel=log_level, maxBytes=int(1e7), backupCount=backup_count) | Setup logzero logger. if logfile is specified, create additional file logger
:param logfile: path to log file destination
:param backup_count: number of rotating files
:param log_level: min. log level FOR FILE LOGGING
:param include_caller: whether to include the caller in the log output to STDOUT, e.g. [script:123] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/logger.py#L40-L58 | [
"def _set_log_format(color, include_caller):\n \"\"\"\n Set log format\n :param color: Log message is colored\n :param include_caller: At the end, put a [caller:line-of-code], e.g. [script:123]\n :return: string of log format\n \"\"\"\n level_name = '* %(levelname)1s'\n time = '%(asctime)s,%... | # -*- coding: utf-8 -*-
"""
dhis2.logger
~~~~~~~~~~~~~~~~~
This module sets up logzero loggers.
"""
import logging
import logzero
def _set_log_format(color, include_caller):
"""
Set log format
:param color: Log message is colored
:param include_caller: At the end, put a [caller:line-of-code], e.g. [script:123]
:return: string of log format
"""
level_name = '* %(levelname)1s'
time = '%(asctime)s,%(msecs)03d'
message = '%(message)s'
color_start = '%(color)s'
color_end = '%(end_color)s'
caller = '[%(module)s:%(lineno)d]'
if color:
if include_caller:
return '{}{}{} {} {} {}'.format(color_start, level_name, color_end, time, message, caller)
else:
return '{}{}{} {} {}'.format(color_start, level_name, color_end, time, message)
else:
if include_caller:
return '{} {} {} {}'.format(level_name, time, message, caller)
else:
return '{} {} {}'.format(level_name, time, message)
|
davidhuser/dhis2.py | dhis2/api.py | Api.from_auth_file | python | def from_auth_file(cls, location=None, api_version=None, user_agent=None):
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent) | Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L139-L162 | [
"def load_json(path):\n \"\"\"\n Load JSON file from path\n :param path: file path\n :return: A Python object (e.g. a dict)\n \"\"\"\n try:\n with open(path, 'r') as json_file:\n return json.load(json_file)\n except (OSError, IOError):\n raise ClientException(\"File not... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api._validate_response | python | def _validate_response(response):
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response | Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L165-L179 | null | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api._validate_request | python | def _validate_request(endpoint, file_type='json', data=None, params=None):
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__)) | Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L182-L200 | null | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api._make_request | python | def _make_request(self, method, endpoint, **kwargs):
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r) | Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L202-L240 | [
"def _validate_response(response):\n \"\"\"\n Return response if ok, raise RequestException if not ok\n :param response: requests.response object\n :return: requests.response object\n \"\"\"\n try:\n response.raise_for_status()\n except requests.RequestException:\n raise RequestEx... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.get | python | def get(self, endpoint, file_type='json', params=None, stream=False):
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream) | GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L242-L251 | [
"def _make_request(self, method, endpoint, **kwargs):\n \"\"\"\n Do the actual request with supplied HTTP method\n :param method: HTTP method\n :param endpoint: DHIS2 API endpoint\n :param kwargs: keyword args\n :return: response if ok, RequestException if not\n \"\"\"\n if isinstance(kwargs... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.post | python | def post(self, endpoint, json=None, params=None, **kwargs):
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params) | POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L253-L261 | [
"def _make_request(self, method, endpoint, **kwargs):\n \"\"\"\n Do the actual request with supplied HTTP method\n :param method: HTTP method\n :param endpoint: DHIS2 API endpoint\n :param kwargs: keyword args\n :return: response if ok, RequestException if not\n \"\"\"\n if isinstance(kwargs... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.put | python | def put(self, endpoint, json=None, params=None, **kwargs):
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params) | PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L263-L272 | [
"def _make_request(self, method, endpoint, **kwargs):\n \"\"\"\n Do the actual request with supplied HTTP method\n :param method: HTTP method\n :param endpoint: DHIS2 API endpoint\n :param kwargs: keyword args\n :return: response if ok, RequestException if not\n \"\"\"\n if isinstance(kwargs... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.patch | python | def patch(self, endpoint, json=None, params=None, **kwargs):
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params) | PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L274-L283 | [
"def _make_request(self, method, endpoint, **kwargs):\n \"\"\"\n Do the actual request with supplied HTTP method\n :param method: HTTP method\n :param endpoint: DHIS2 API endpoint\n :param kwargs: keyword args\n :return: response if ok, RequestException if not\n \"\"\"\n if isinstance(kwargs... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.delete | python | def delete(self, endpoint, json=None, params=None, **kwargs):
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params) | DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L285-L294 | [
"def _make_request(self, method, endpoint, **kwargs):\n \"\"\"\n Do the actual request with supplied HTTP method\n :param method: HTTP method\n :param endpoint: DHIS2 API endpoint\n :param kwargs: keyword args\n :return: response if ok, RequestException if not\n \"\"\"\n if isinstance(kwargs... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.get_paged | python | def get_paged(self, endpoint, params=None, page_size=50, merge=False):
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))} | GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]} | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L296-L337 | null | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.get_sqlview | python | def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator()) | GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L339-L379 | [
"def get(self, endpoint, file_type='json', params=None, stream=False):\n \"\"\"\n GET from DHIS2\n :param endpoint: DHIS2 API endpoint\n :param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON\n :param params: HTTP parameters\n :param stream: use requests' stream parameter\n :r... | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params)
|
davidhuser/dhis2.py | dhis2/api.py | Api.post_partitioned | python | def post_partitioned(self, endpoint, json, params=None, thresh=1000):
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params) | Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L381-L407 | null | class Api(object):
"""A Python interface to the DHIS2 API
Example usage:
from dhis2 import Api
api = Api('play.dhis2.org/demo', 'admin', 'district')
"""
def __init__(self, server, username, password, api_version=None, user_agent=None):
"""
:param server: baseurl, e.g. 'play.dhis2.org/demo'
:param username: DHIS2 username
:param password: DHIS2 password
:param api_version: optional, creates a url like /api/29/schemas
:param user_agent: optional, add user-agent to header. otherwise it uses requests' user-agent.
"""
self._base_url, self._api_version, self._info, self._version, self._version_int, self._revision = (None,)*6
self.base_url = server
self.api_version = api_version
self.session = requests.Session()
self.username = username
self.session.auth = (self.username, password)
if user_agent:
self.session.headers['user-agent'] = user_agent
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, server):
if '/api' in server:
raise ClientException("Do not include /api/ in the DHIS2 `server` argument")
server = server.strip()
is_local = 'localhost' in server or '127.0.0.1' in server
has_scheme = '://' in server
# add http / https schemes when missing
if is_local and not has_scheme:
url = 'http://{}'.format(server)
elif not is_local and not has_scheme:
url = 'https://{}'.format(server)
else:
url = server
o = urlparse(url)
self._base_url = urlunparse((o.scheme, o.netloc, o.path, '', '', ''))
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, number):
if number:
try:
i = int(number)
if i < 25:
raise ValueError
except ValueError:
raise ClientException("`api_version` must be 25 or greater: {}".format(number))
else:
self._api_version = i
else:
self._api_version = None
@property
def api_url(self):
if self._api_version:
return '{}/api/{}'.format(self._base_url, self._api_version)
else:
return '{}/api'.format(self._base_url)
@property
def info(self):
if not self._info:
self._info = self.get('system/info').json()
return self._info
@property
def version(self):
return self._version if self._version else self.info['version']
@property
def revision(self):
return self._revision if self._revision else self.info['revision']
@property
def version_int(self):
if not self._version_int:
self._version_int = version_to_int(self.version)
return self._version_int
def __str__(self):
s = "DHIS2 Base URL: '{}'\n" \
"API URL: '{}'\n" \
"Username: '{}'".format(self.base_url, self.api_url, self.username)
return s
@classmethod
def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent)
@staticmethod
def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response
@staticmethod
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__))
def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r)
def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream)
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params)
def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params)
def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params)
def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params)
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))}
def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator())
|
davidhuser/dhis2.py | dhis2/utils.py | load_csv | python | def load_csv(path, delimiter=','):
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path)) | Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L25-L46 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | load_json | python | def load_json(path):
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path)) | Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict) | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L49-L59 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | partition_payload | python | def partition_payload(data, key, thresh):
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]} | Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L62-L81 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | search_auth_file | python | def search_auth_file(filename='dish.json'):
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename)) | Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L84-L99 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | version_to_int | python | def version_to_int(value):
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return | Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L102-L116 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | generate_uid | python | def generate_uid():
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest | Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L119-L129 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | is_valid_uid | python | def is_valid_uid(uid):
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid)) | :return: True if it is a valid DHIS2 UID, False if not | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L132-L139 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.