text stringlengths 81 112k |
|---|
Render basicly the text.
def render(self, display):
"""Render basicly the text."""
# to handle changing objects / callable
if self.text != self._last_text:
self._render()
display.blit(self._surface, (self.topleft, self.size)) |
The position of the cursor in the text.
def cursor(self):
"""The position of the cursor in the text."""
if self._cursor < 0:
self.cursor = 0
if self._cursor > len(self):
self.cursor = len(self)
return self._cursor |
Move the cursor of one letter to the right (1) or the the left.
def move_cursor_one_letter(self, letter=RIGHT):
"""Move the cursor of one letter to the right (1) or the the left."""
assert letter in (self.RIGHT, self.LEFT)
if letter == self.RIGHT:
self.cursor += 1
if self.cursor > len(self.text):
self.cursor -= 1
else:
self.cursor -= 1
if self.cursor < 0:
self.cursor += 1 |
Move the cursor of one word to the right (1) or the the left (-1).
def move_cursor_one_word(self, word=LEFT):
"""Move the cursor of one word to the right (1) or the the left (-1)."""
assert word in (self.RIGHT, self.LEFT)
if word == self.RIGHT:
papy = self.text.find(' ', self.cursor) + 1
if not papy:
papy = len(self)
self.cursor = papy
else:
papy = self.text.rfind(' ', 0, self.cursor)
if papy == -1:
papy = 0
self.cursor = papy |
Delete one letter the right or the the left of the cursor.
def delete_one_letter(self, letter=RIGHT):
"""Delete one letter the right or the the left of the cursor."""
assert letter in (self.RIGHT, self.LEFT)
if letter == self.LEFT:
papy = self.cursor
self.text = self.text[:self.cursor - 1] + self.text[self.cursor:]
self.cursor = papy - 1
else:
self.text = self.text[:self.cursor] + self.text[self.cursor + 1:] |
Delete one word the right or the the left of the cursor.
def delete_one_word(self, word=RIGHT):
"""Delete one word the right or the the left of the cursor."""
assert word in (self.RIGHT, self.LEFT)
if word == self.RIGHT:
papy = self.text.find(' ', self.cursor) + 1
if not papy:
papy = len(self.text)
self.text = self.text[:self.cursor] + self.text[papy:]
else:
papy = self.text.rfind(' ', 0, self.cursor)
if papy == -1:
papy = 0
self.text = self.text[:papy] + self.text[self.cursor:]
self.cursor = papy |
Add a letter at the cursor pos.
def add_letter(self, letter):
"""Add a letter at the cursor pos."""
assert isinstance(letter, str)
assert len(letter) == 1
self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]
self.cursor += 1 |
Update the text and position of cursor according to the event passed.
def update(self, event_or_list):
"""Update the text and position of cursor according to the event passed."""
event_or_list = super().update(event_or_list)
for e in event_or_list:
if e.type == KEYDOWN:
if e.key == K_RIGHT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.RIGHT)
else:
self.move_cursor_one_letter(self.RIGHT)
elif e.key == K_LEFT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.LEFT)
else:
self.move_cursor_one_letter(self.LEFT)
elif e.key == K_BACKSPACE:
if self.cursor == 0:
continue
if e.mod & KMOD_CTRL:
self.delete_one_word(self.LEFT)
else:
self.delete_one_letter(self.LEFT)
elif e.key == K_DELETE:
if e.mod & KMOD_CTRL:
self.delete_one_word(self.RIGHT)
else:
self.delete_one_letter(self.RIGHT)
elif e.unicode != '' and e.unicode.isprintable():
self.add_letter(e.unicode) |
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
def _render(self):
"""
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
"""
self._last_text = self.text
self._surface = self.font.render(self.text, True, self.color, self.bg_color)
size = self.width, self._surface.get_height()
self.size = size |
The text displayed instead of the real one.
def shawn_text(self):
"""The text displayed instead of the real one."""
if len(self._shawn_text) == len(self):
return self._shawn_text
if self.style == self.DOTS:
return chr(0x2022) * len(self)
ranges = [
(902, 1366),
(192, 683),
(33, 122)
]
s = ''
while len(s) < len(self.text):
apolo = randint(33, 1366)
for a, b in ranges:
if a <= apolo <= b:
s += chr(apolo)
break
self._shawn_text = s
return s |
The cursor position in pixels.
def cursor_pos(self):
"""The cursor position in pixels."""
if len(self) == 0:
return self.left + self.default_text.get_width()
papy = self._surface.get_width()
if papy > self.w:
shift = papy - self.width
else:
shift = 0
return self.left + self.font.size(self.shawn_text[:self.cursor])[0] - shift |
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
def _render(self):
"""
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
"""
self._last_text = self.shawn_text
self._surface = self.font.render(self.shawn_text, True, self.color, self.bg_color)
size = self.w, self._surface.get_height()
self.size = size |
Render basicly the text.
def render(self, display):
"""Render basicly the text."""
# to handle changing objects / callable
if self.shawn_text != self._last_text:
self._render()
if self.text:
papy = self._surface.get_width()
if papy <= self.width:
display.blit(self._surface, (self.topleft, self.size))
else:
display.blit(self._surface, (self.topleft, self.size), ((papy - self.w, 0), self.size))
else:
display.blit(self.default_text, (self.topleft, self.size))
if self._focus:
groom = self.cursor_pos()
line(display, (groom, self.top), (groom, self.bottom), CONCRETE) |
Return a pygame image from a latex template.
def latex_to_img(tex):
"""Return a pygame image from a latex template."""
with tempfile.TemporaryDirectory() as tmpdirname:
with open(tmpdirname + r'\tex.tex', 'w') as f:
f.write(tex)
os.system(r"latex {0}\tex.tex -halt-on-error -interaction=batchmode -disable-installer -aux-directory={0} "
r"-output-directory={0}".format(tmpdirname))
os.system(r"dvipng -T tight -z 9 --truecolor -o {0}\tex.png {0}\tex.dvi".format(tmpdirname))
# os.system(r'latex2png ' + tmpdirname)
image = pygame.image.load(tmpdirname + r'\tex.png')
return image |
Return the mix of two colors at a state of :pos:
Retruns color1 * pos + color2 * (1 - pos)
def mix(color1, color2, pos=0.5):
"""
Return the mix of two colors at a state of :pos:
Retruns color1 * pos + color2 * (1 - pos)
"""
opp_pos = 1 - pos
red = color1[0] * pos + color2[0] * opp_pos
green = color1[1] * pos + color2[1] * opp_pos
blue = color1[2] * pos + color2[2] * opp_pos
return int(red), int(green), int(blue) |
Convert the name of a color into its RGB value
def name2rgb(name):
"""Convert the name of a color into its RGB value"""
try:
import colour
except ImportError:
raise ImportError('You need colour to be installed: pip install colour')
c = colour.Color(name)
color = int(c.red * 255), int(c.green * 255), int(c.blue * 255)
return color |
Parse the command man page.
def parse_page(page):
"""Parse the command man page."""
colors = get_config()['colors']
with io.open(page, encoding='utf-8') as f:
lines = f.readlines()
output_lines = []
for line in lines[1:]:
if is_headline(line):
continue
elif is_description(line):
output_lines.append(click.style(line.replace('>', ' '),
fg=colors['description']))
elif is_old_usage(line):
output_lines.append(click.style(line, fg=colors['usage']))
elif is_code_example(line):
line = ' ' + line if line.startswith('`') else line[2:]
output_lines.append(click.style(line.replace('`', ''),
fg=colors['command']))
elif is_line_break(line):
output_lines.append(click.style(line))
else:
output_lines.append(click.style('- ' + line, fg=colors['usage']))
return output_lines |
Configure the module logging engine.
def configure_logging(level=logging.DEBUG):
"""Configure the module logging engine."""
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger |
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
def path_join(*args):
"""
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
"""
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args) |
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path |
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
def get_ssh_agent_keys(logger):
"""
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
"""
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys |
Create the CLI argument parser.
def create_parser():
"""Create the CLI argument parser."""
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser |
The main.
def main(args=None):
"""The main."""
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run() |
Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one.
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one."""
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False |
Match mod, utime and uid/gid with locals one.
def _match_modes(self, remote_path, l_st):
"""Match mod, utime and uid/gid with locals one."""
self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode))
self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime))
if self.chown:
self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid) |
Upload local_path to remote_path and set permission and mtime.
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime."""
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st) |
Remove the remote directory node.
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node."""
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
) |
Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
"""
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
) |
Create a new link pointing to link_destination in remote_path position.
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e)) |
Check if the given directory tree node has to be uploaded/created on the remote folder.
def node_check_for_upload_create(self, relative_path, f):
"""Check if the given directory tree node has to be uploaded/created on the remote folder."""
if not relative_path:
# we're at the root of the shared directory tree
relative_path = str()
# the (absolute) local address of f.
local_path = path_join(self.local_path, relative_path, f)
try:
l_st = os.lstat(local_path)
except OSError as e:
"""A little background here.
Sometimes, in big clusters configurations (mail, etc.),
files could disappear or be moved, suddenly.
There's nothing to do about it,
system should be stopped before doing backups.
Anyway, we log it, and skip it.
"""
self.logger.error("error while checking {}: {}".format(relative_path, e))
return
if local_path in self.exclude_list:
self.logger.info("Skipping excluded file %s.", local_path)
return
# the (absolute) remote address of f.
remote_path = path_join(self.remote_path, relative_path, f)
# First case: f is a directory
if S_ISDIR(l_st.st_mode):
# we check if the folder exists on the remote side
# it has to be a folder, otherwise it would have already been
# deleted
try:
self.sftp.stat(remote_path)
except IOError: # it doesn't exist yet on remote side
self.sftp.mkdir(remote_path)
self._match_modes(remote_path, l_st)
# now, we should traverse f too (recursion magic!)
self.check_for_upload_create(path_join(relative_path, f))
# Second case: f is a symbolic link
elif S_ISLNK(l_st.st_mode):
# read the local link
local_link = os.readlink(local_path)
absolute_local_link = os.path.realpath(local_link)
# is it absolute?
is_absolute = local_link.startswith("/")
# and does it point inside the shared directory?
# add trailing slash (security)
trailing_local_path = path_join(self.local_path, '')
relpath = os.path.commonprefix(
[absolute_local_link,
trailing_local_path]
) == trailing_local_path
if relpath:
relative_link = absolute_local_link[len(trailing_local_path):]
else:
relative_link = None
"""
# Refactor them all, be efficient!
# Case A: absolute link pointing outside shared directory
# (we can only update the remote part)
if is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case B: absolute link pointing inside shared directory
# (we can leave it as it is or fix the prefix to match the one of the remote server)
elif is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Case C: relative link pointing outside shared directory
# (all we can do is try to make the link anyway)
elif not is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case D: relative link pointing inside shared directory
# (we preserve the relativity and link it!)
elif not is_absolute and relpath:
self.create_update_symlink(local_link, remote_path)
"""
if is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
path_join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Third case: regular file
elif S_ISREG(l_st.st_mode):
try:
r_st = self.sftp.lstat(remote_path)
if self._file_need_upload(l_st, r_st):
self.file_upload(local_path, remote_path, l_st)
except IOError as e:
if e.errno == errno.ENOENT:
self.file_upload(local_path, remote_path, l_st)
# Anything else.
else:
self.logger.warning("Skipping unsupported file %s.", local_path) |
Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree.
def check_for_upload_create(self, relative_path=None):
"""Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree."""
for f in os.listdir(
path_join(
self.local_path, relative_path) if relative_path else self.local_path
):
self.node_check_for_upload_create(relative_path, f) |
Run the sync.
Confront the local and the remote directories and perform the needed changes.
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes."""
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1) |
tree unix command replacement.
def list_files(start_path):
"""tree unix command replacement."""
s = u'\n'
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, '').count(os.sep)
indent = ' ' * 4 * level
s += u'{}{}/\n'.format(indent, os.path.basename(root))
sub_indent = ' ' * 4 * (level + 1)
for f in files:
s += u'{}{}\n'.format(sub_indent, f)
return s |
Create a nested dictionary that represents the folder structure of `start_path`.
Liberally adapted from
http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/
def file_tree(start_path):
"""
Create a nested dictionary that represents the folder structure of `start_path`.
Liberally adapted from
http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/
"""
nested_dirs = {}
root_dir = start_path.rstrip(os.sep)
start = root_dir.rfind(os.sep) + 1
for path, dirs, files in os.walk(root_dir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], nested_dirs)
parent[folders[-1]] = subdir
return nested_dirs |
Capture standard output and error.
def capture_sys_output():
"""Capture standard output and error."""
capture_out, capture_err = StringIO(), StringIO()
current_out, current_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = current_out, current_err |
Suppress logging.
def suppress_logging(log_level=logging.CRITICAL):
"""Suppress logging."""
logging.disable(log_level)
yield
logging.disable(logging.NOTSET) |
Override user environmental variables with custom one.
def override_env_variables():
"""Override user environmental variables with custom one."""
env_vars = ("LOGNAME", "USER", "LNAME", "USERNAME")
old = [os.environ[v] if v in os.environ else None for v in env_vars]
for v in env_vars:
os.environ[v] = "test"
yield
for i, v in enumerate(env_vars):
if old[i]:
os.environ[v] = old[i] |
Override the `$SSH_AUTH_SOCK `env variable to mock the absence of an SSH agent.
def override_ssh_auth_env():
"""Override the `$SSH_AUTH_SOCK `env variable to mock the absence of an SSH agent."""
ssh_auth_sock = "SSH_AUTH_SOCK"
old_ssh_auth_sock = os.environ.get(ssh_auth_sock)
del os.environ[ssh_auth_sock]
yield
if old_ssh_auth_sock:
os.environ[ssh_auth_sock] = old_ssh_auth_sock |
Get the configurations from .tldrrc and return it as a dict.
def get_config():
"""Get the configurations from .tldrrc and return it as a dict."""
config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if not path.exists(config_path):
sys.exit("Can't find config file at: {0}. You may use `tldr init` "
"to init the config file.".format(config_path))
with io.open(config_path, encoding='utf-8') as f:
try:
config = yaml.safe_load(f)
except yaml.scanner.ScannerError:
sys.exit("The config file is not a valid YAML file.")
supported_colors = ['black', 'red', 'green', 'yellow', 'blue',
'magenta', 'cyan', 'white']
if not set(config['colors'].values()).issubset(set(supported_colors)):
sys.exit("Unsupported colors in config file: {0}.".format(
', '.join(set(config['colors'].values()) - set(supported_colors))))
if not path.exists(config['repo_directory']):
sys.exit("Can't find the tldr repo, check the `repo_directory` "
"setting in config file.")
return config |
Parse the man page and return the parsed lines.
def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines."""
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines |
Find the command man page in the pages directory.
def find_page_location(command, specified_platform):
"""Find the command man page in the pages directory."""
repo_directory = get_config()['repo_directory']
default_platform = get_config()['platform']
command_platform = (
specified_platform if specified_platform else default_platform)
with io.open(path.join(repo_directory, 'pages/index.json'),
encoding='utf-8') as f:
index = json.load(f)
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
sys.exit(
("Sorry, we don't support command: {0} right now.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
supported_platforms = index['commands'][
command_list.index(command)]['platform']
if command_platform in supported_platforms:
platform = command_platform
elif 'common' in supported_platforms:
platform = 'common'
else:
platform = ''
if not platform:
sys.exit(
("Sorry, command {0} is not supported on your platform.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
page_path = path.join(path.join(repo_directory, 'pages'),
path.join(platform, command + '.md'))
return page_path |
Find the command usage.
def find(command, on):
"""Find the command usage."""
output_lines = parse_man_page(command, on)
click.echo(''.join(output_lines)) |
Update to the latest pages.
def update():
"""Update to the latest pages."""
repo_directory = get_config()['repo_directory']
os.chdir(repo_directory)
click.echo("Check for updates...")
local = subprocess.check_output('git rev-parse master'.split()).strip()
remote = subprocess.check_output(
'git ls-remote https://github.com/tldr-pages/tldr/ HEAD'.split()
).split()[0]
if local != remote:
click.echo("Updating...")
subprocess.check_call('git checkout master'.split())
subprocess.check_call('git pull --rebase'.split())
build_index()
click.echo("Update to the latest and rebuild the index.")
else:
click.echo("No need for updates.") |
Init config file.
def init():
"""Init config file."""
default_config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if path.exists(default_config_path):
click.echo("There is already a config file exists, "
"skip initializing it.")
else:
repo_path = click.prompt("Input the tldr repo path(absolute path)")
if not path.exists(repo_path):
sys.exit("Repo path not exist, clone it first.")
platform = click.prompt("Input your platform(linux, osx or sunos)")
if platform not in ['linux', 'osx', 'sunos']:
sys.exit("Platform should be in linux, osx or sunos.")
colors = {
"description": "blue",
"usage": "green",
"command": "cyan"
}
config = {
"repo_directory": repo_path,
"colors": colors,
"platform": platform
}
with open(default_config_path, 'w') as f:
f.write(yaml.safe_dump(config, default_flow_style=False))
click.echo("Initializing the config file at {0}".format(
default_config_path)) |
Locate the command's man page.
def locate(command, on):
"""Locate the command's man page."""
location = find_page_location(command, on)
click.echo(location) |
Produce a relationship between this mapped table and another
one.
This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship`
construct.
def relate(cls, propname, *args, **kwargs):
"""Produce a relationship between this mapped table and another
one.
This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship`
construct.
"""
class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs)) |
Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
def execute(self, stmt, **params):
"""Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
"""
return self.session.execute(sql.text(stmt, bind=self.bind), **params) |
Configure a mapping to the given attrname.
This is the "master" method that can be used to create any
configuration.
:param attrname: String attribute name which will be
established as an attribute on this :class:.`.SQLSoup`
instance.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
:param tablename: String name of a :class:`.Table` to be
reflected. If a :class:`.Table` is already available,
use the ``selectable`` argument. This argument is
mutually exclusive versus the ``selectable`` argument.
:param selectable: a :class:`.Table`, :class:`.Join`, or
:class:`.Select` object which will be mapped. This
argument is mutually exclusive versus the ``tablename``
argument.
:param schema: String schema name to use if the
``tablename`` argument is present.
def map_to(self, attrname, tablename=None, selectable=None,
schema=None, base=None, mapper_args=util.immutabledict()):
"""Configure a mapping to the given attrname.
This is the "master" method that can be used to create any
configuration.
:param attrname: String attribute name which will be
established as an attribute on this :class:.`.SQLSoup`
instance.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
:param tablename: String name of a :class:`.Table` to be
reflected. If a :class:`.Table` is already available,
use the ``selectable`` argument. This argument is
mutually exclusive versus the ``selectable`` argument.
:param selectable: a :class:`.Table`, :class:`.Join`, or
:class:`.Select` object which will be mapped. This
argument is mutually exclusive versus the ``tablename``
argument.
:param schema: String schema name to use if the
``tablename`` argument is present.
"""
if attrname in self._cache:
raise SQLSoupError(
"Attribute '%s' is already mapped to '%s'" % (
attrname,
class_mapper(self._cache[attrname]).mapped_table
))
if tablename is not None:
if not isinstance(tablename, basestring):
raise ArgumentError("'tablename' argument must be a string."
)
if selectable is not None:
raise ArgumentError("'tablename' and 'selectable' "
"arguments are mutually exclusive")
selectable = Table(tablename,
self._metadata,
autoload=True,
autoload_with=self.bind,
schema=schema or self.schema)
elif schema:
raise ArgumentError("'tablename' argument is required when "
"using 'schema'.")
elif selectable is not None:
if not isinstance(selectable, expression.FromClause):
raise ArgumentError("'selectable' argument must be a "
"table, select, join, or other "
"selectable construct.")
else:
raise ArgumentError("'tablename' or 'selectable' argument is "
"required.")
if not selectable.primary_key.columns and not \
'primary_key' in mapper_args:
if tablename:
raise SQLSoupError(
"table '%s' does not have a primary "
"key defined" % tablename)
else:
raise SQLSoupError(
"selectable '%s' does not have a primary "
"key defined" % selectable)
mapped_cls = _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
)
self._cache[attrname] = mapped_cls
return mapped_cls |
Map a selectable directly.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
def map(self, selectable, base=None, **mapper_args):
"""Map a selectable directly.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
return _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
) |
Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
def with_labels(self, selectable, base=None, **mapper_args):
"""Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
# TODO give meaningful aliases
return self.map(
expression._clause_element_as_expr(selectable).
select(use_labels=True).
alias('foo'), base=base, **mapper_args) |
Create an :func:`.expression.join` and map to it.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param left: a mapped class or table object.
:param right: a mapped class or table object.
:param onclause: optional "ON" clause construct..
:param isouter: if True, the join will be an OUTER join.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
def join(self, left, right, onclause=None, isouter=False,
base=None, **mapper_args):
"""Create an :func:`.expression.join` and map to it.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param left: a mapped class or table object.
:param right: a mapped class or table object.
:param onclause: optional "ON" clause construct..
:param isouter: if True, the join will be an OUTER join.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
j = join(left, right, onclause=onclause, isouter=isouter)
return self.map(j, base=base, **mapper_args) |
Return the named entity from this :class:`.SQLSoup`, or
create if not present.
For more generalized mapping, see :meth:`.map_to`.
def entity(self, attr, schema=None):
"""Return the named entity from this :class:`.SQLSoup`, or
create if not present.
For more generalized mapping, see :meth:`.map_to`.
"""
try:
return self._cache[attr]
except KeyError, ke:
return self.map_to(attr, tablename=attr, schema=schema) |
\
Distance between 2 features. The integer result is always positive or zero.
If the features overlap or touch, it is zero.
>>> from intersecter import Feature, distance
>>> distance(Feature(1, 2), Feature(12, 13))
10
>>> distance(Feature(1, 2), Feature(2, 3))
0
>>> distance(Feature(1, 100), Feature(20, 30))
0
def distance(f1, f2):
"""\
Distance between 2 features. The integer result is always positive or zero.
If the features overlap or touch, it is zero.
>>> from intersecter import Feature, distance
>>> distance(Feature(1, 2), Feature(12, 13))
10
>>> distance(Feature(1, 2), Feature(2, 3))
0
>>> distance(Feature(1, 100), Feature(20, 30))
0
"""
if f1.end < f2.start: return f2.start - f1.end
if f2.end < f1.start: return f1.start - f2.end
return 0 |
Return a object of all stored intervals intersecting between (start, end) inclusive.
def find(self, start, end, chrom=None):
"""Return a object of all stored intervals intersecting between (start, end) inclusive."""
intervals = self.intervals[chrom]
ilen = len(intervals)
# NOTE: we only search for starts, since any feature that starts within max_len of
# the query could overlap, we must subtract max_len from the start to get the needed
# search space. everything else proceeds like a binary search.
# (but add distance calc for candidates).
if not chrom in self.max_len: return []
ileft = binsearch_left_start(intervals, start - self.max_len[chrom], 0, ilen)
iright = binsearch_right_end(intervals, end, ileft, ilen)
query = Feature(start, end)
# we have to check the distance to make sure we didnt pick up anything
# that started within max_len, but wasnt as long as max_len
return [f for f in intervals[ileft:iright] if distance(f, query) == 0] |
return the nearest n features strictly to the left of a Feature f.
Overlapping features are not considered as to the left.
f: a Feature object
n: the number of features to return
def left(self, f, n=1):
"""return the nearest n features strictly to the left of a Feature f.
Overlapping features are not considered as to the left.
f: a Feature object
n: the number of features to return
"""
intervals = self.intervals[f.chrom]
if intervals == []: return []
iright = binsearch_left_start(intervals, f.start, 0 , len(intervals)) + 1
ileft = binsearch_left_start(intervals, f.start - self.max_len[f.chrom] - 1, 0, 0)
results = sorted((distance(other, f), other) for other in intervals[ileft:iright] if other.end < f.start and distance(f, other) != 0)
if len(results) == n:
return [r[1] for r in results]
# have to do some extra work here since intervals are sorted
# by starts, and we dont know which end may be around...
# in this case, we got some extras, just return as many as
# needed once we see a gap in distances.
for i in range(n, len(results)):
if results[i - 1][0] != results[i][0]:
return [r[1] for r in results[:i]]
if ileft == 0:
return [r[1] for r in results]
# here, didn't get enough, so move left and try again.
1/0 |
return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
def right(self, f, n=1):
"""return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
"""
intervals = self.intervals[f.chrom]
ilen = len(intervals)
iright = binsearch_right_end(intervals, f.end, 0, ilen)
results = []
while iright < ilen:
i = len(results)
if i > n:
if distance(f, results[i - 1]) != distance(f, results[i - 2]):
return results[:i - 1]
other = intervals[iright]
iright += 1
if distance(other, f) == 0: continue
results.append(other)
return results |
find n upstream features where upstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
def upstream(self, f, n=1):
"""find n upstream features where upstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.right(f, n)
return self.left(f, n) |
find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.left(f, n)
return self.right(f, n) |
return the n nearest neighbors to the given feature
f: a Feature object
k: the number of features to return
def knearest(self, f_or_start, end=None, chrom=None, k=1):
"""return the n nearest neighbors to the given feature
f: a Feature object
k: the number of features to return
"""
if end is not None:
f = Feature(f_or_start, end, chrom=chrom)
else:
f = f_or_start
DIST = 2000
feats = filter_feats(self.find(f.start - DIST, f.end + DIST, chrom=f.chrom), f, k)
if len(feats) >= k:
return feats
nfeats = k - len(feats)
fleft = Feature(f.start - DIST, f.start, chrom=f.chrom)
feats.extend(self.left(fleft, n=nfeats))
fright = Feature(f.end, f.end + DIST, chrom=f.chrom)
feats.extend(self.right(fright, n=nfeats))
return filter_feats(feats, f, k) |
find all elements between (or overlapping) start and end
def find(self, start, end):
"""find all elements between (or overlapping) start and end"""
if self.intervals and not end < self.intervals[0].start:
overlapping = [i for i in self.intervals if i.end >= start
and i.start <= end]
else:
overlapping = []
if self.left and start <= self.center:
overlapping += self.left.find(start, end)
if self.right and end >= self.center:
overlapping += self.right.find(start, end)
return overlapping |
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
def sequence(db, chrom, start, end):
"""
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
"""
url = "http://genome.ucsc.edu/cgi-bin/das/%s" % db
url += "/dna?segment=%s:%i,%i"
xml = U.urlopen(url % (chrom, start, end)).read()
return _seq_from_xml(xml) |
alter the table to work between different
dialects
def set_table(genome, table, table_name, connection_string, metadata):
"""
alter the table to work between different
dialects
"""
table = Table(table_name, genome._metadata, autoload=True,
autoload_with=genome.bind, extend_existing=True)
#print "\t".join([c.name for c in table.columns])
# need to prefix the indexes with the table name to avoid collisions
for i, idx in enumerate(table.indexes):
idx.name = table_name + "." + idx.name + "_ix" + str(i)
cols = []
for i, col in enumerate(table.columns):
# convert mysql-specific types to varchar
#print col.name, col.type, isinstance(col.type, ENUM)
if isinstance(col.type, (LONGBLOB, ENUM)):
if 'sqlite' in connection_string:
col.type = VARCHAR()
elif 'postgres' in connection_string:
if isinstance(col.type, ENUM):
#print dir(col)
col.type = PG_ENUM(*col.type.enums, name=col.name,
create_type=True)
else:
col.type = VARCHAR()
elif str(col.type) == "VARCHAR" \
and ("mysql" in connection_string \
or "postgres" in connection_string):
if col.type.length is None:
col.type.length = 48 if col.name != "description" else None
if not "mysql" in connection_string:
if str(col.type).lower().startswith("set("):
col.type = VARCHAR(15)
cols.append(col)
table = Table(table_name, genome._metadata, *cols,
autoload_replace=True, extend_existing=True)
return table |
internal: create a dburl from a set of parameters or the defaults on
this object
def create_url(self, db="", user="genome", host="genome-mysql.cse.ucsc.edu",
password="", dialect="mysqldb"):
"""
internal: create a dburl from a set of parameters or the defaults on
this object
"""
if os.path.exists(db):
db = "sqlite:///" + db
# Is this a DB URL? If so, use it directly
if self.db_regex.match(db):
self.db = self.url = db
self.dburl = db
self.user = self.host = self.password = ""
else:
self.db = db
if user == "genome" and host != "genome-mysql.cse.ucsc.edu":
import getpass
user = getpass.getuser()
self.host = host
self.user = user
self.password = (":" + password) if password else ""
self.dburl = self.url.format(db=self.db, user=self.user,
host=self.host, password=self.password, dialect=dialect) |
miror a set of `tables` from `dest_url`
Returns a new Genome object
Parameters
----------
tables : list
an iterable of tables
dest_url: str
a dburl string, e.g. 'sqlite:///local.db'
def mirror(self, tables, dest_url):
"""
miror a set of `tables` from `dest_url`
Returns a new Genome object
Parameters
----------
tables : list
an iterable of tables
dest_url: str
a dburl string, e.g. 'sqlite:///local.db'
"""
from mirror import mirror
return mirror(self, tables, dest_url) |
create a pandas dataframe from a table or query
Parameters
----------
table : table
a table in this database or a query
limit: integer
an integer limit on the query
offset: integer
an offset for the query
def dataframe(self, table):
"""
create a pandas dataframe from a table or query
Parameters
----------
table : table
a table in this database or a query
limit: integer
an integer limit on the query
offset: integer
an offset for the query
"""
from pandas import DataFrame
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
rec = table.first()
except AttributeError:
rec = table[0]
if hasattr(table, "all"):
records = table.all()
else:
records = [tuple(t) for t in table]
cols = [c.name for c in rec._table.columns]
return DataFrame.from_records(records, columns=cols) |
use some of the machinery in pandas to load a file into a table
Parameters
----------
fname : str
filename or filehandle to load
table : str
table to load the file to
sep : str
CSV separator
bins : bool
add a "bin" column for efficient spatial queries.
indexes : list[str]
list of columns to index
def load_file(self, fname, table=None, sep="\t", bins=False, indexes=None):
"""
use some of the machinery in pandas to load a file into a table
Parameters
----------
fname : str
filename or filehandle to load
table : str
table to load the file to
sep : str
CSV separator
bins : bool
add a "bin" column for efficient spatial queries.
indexes : list[str]
list of columns to index
"""
convs = {"#chr": "chrom", "start": "txStart", "end": "txEnd", "chr":
"chrom", "pos": "start", "POS": "start", "chromStart": "txStart",
"chromEnd": "txEnd"}
if table is None:
import os.path as op
table = op.basename(op.splitext(fname)[0]).replace(".", "_")
print("writing to:", table, file=sys.stderr)
from pandas.io import sql
import pandas as pa
from toolshed import nopen
needs_name = False
for i, chunk in enumerate(pa.read_csv(nopen(fname), iterator=True,
chunksize=100000, sep=sep, encoding="latin-1")):
chunk.columns = [convs.get(k, k) for k in chunk.columns]
if not "name" in chunk.columns:
needs_name = True
chunk['name'] = chunk.get('chrom', chunk[chunk.columns[0]])
if bins:
chunk['bin'] = 1
if i == 0 and not table in self.tables:
flavor = self.url.split(":")[0]
schema = sql.get_schema(chunk, table, flavor)
print(schema)
self.engine.execute(schema)
elif i == 0:
print >>sys.stderr,\
"""adding to existing table, you may want to drop first"""
tbl = getattr(self, table)._table
cols = chunk.columns
data = list(dict(zip(cols, x)) for x in chunk.values)
if needs_name:
for d in data:
d['name'] = "%s:%s" % (d.get("chrom"), d.get("txStart", d.get("chromStart")))
if bins:
for d in data:
d['bin'] = max(Genome.bins(int(d["txStart"]), int(d["txEnd"])))
self.engine.execute(tbl.insert(), data)
self.session.commit()
if i > 0:
print >>sys.stderr, "writing row:", i * 100000
if "txStart" in chunk.columns:
if "chrom" in chunk.columns:
ssql = """CREATE INDEX "%s.chrom_txStart" ON "%s" (chrom, txStart)""" % (table, table)
else:
ssql = """CREATE INDEX "%s.txStart" ON "%s" (txStart)""" % (table, table)
self.engine.execute(ssql)
for index in (indexes or []):
ssql = """CREATE INDEX "%s.%s" ON "%s" (%s)""" % (table,
index, table, index)
self.engine.execute(ssql)
if bins:
ssql = """CREATE INDEX "%s.chrom_bin" ON "%s" (chrom, bin)""" % (table, table)
self.engine.execute(ssql)
self.session.commit() |
open a web-browser to the DAVID online enrichment tool
Parameters
----------
refseq_list : list
list of refseq names to check for enrichment
annot : list
iterable of DAVID annotations to check for enrichment
def david_go(refseq_list, annot=('SP_PIR_KEYWORDS', 'GOTERM_BP_FAT',
'GOTERM_CC_FAT', 'GOTERM_MF_FAT')):
"""
open a web-browser to the DAVID online enrichment tool
Parameters
----------
refseq_list : list
list of refseq names to check for enrichment
annot : list
iterable of DAVID annotations to check for enrichment
"""
URL = "http://david.abcc.ncifcrf.gov/api.jsp?type=REFSEQ_MRNA&ids=%s&tool=term2term&annot="
import webbrowser
webbrowser.open(URL % ",".join(set(refseq_list)) + ",".join(annot)) |
perform an efficient spatial query using the bin column if available.
The possible bins are calculated from the `start` and `end` sent to
this function.
Parameters
----------
table : str or table
table to query
chrom : str
chromosome for the query
start : int
0-based start postion
end : int
0-based end position
def bin_query(self, table, chrom, start, end):
"""
perform an efficient spatial query using the bin column if available.
The possible bins are calculated from the `start` and `end` sent to
this function.
Parameters
----------
table : str or table
table to query
chrom : str
chromosome for the query
start : int
0-based start postion
end : int
0-based end position
"""
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
tbl = table._table
except AttributeError:
tbl = table.column_descriptions[0]['type']._table
q = table.filter(tbl.c.chrom == chrom)
if hasattr(tbl.c, "bin"):
bins = Genome.bins(start, end)
if len(bins) < 100:
q = q.filter(tbl.c.bin.in_(bins))
if hasattr(tbl.c, "txStart"):
return q.filter(tbl.c.txStart <= end).filter(tbl.c.txEnd >= start)
return q.filter(tbl.c.chromStart <= end).filter(tbl.c.chromEnd >= start) |
Return k-nearest upstream features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of upstream neighbors to return
def upstream(self, table, chrom_or_feat, start=None, end=None, k=1):
"""
Return k-nearest upstream features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of upstream neighbors to return
"""
res = self.knearest(table, chrom_or_feat, start, end, k, "up")
end = getattr(chrom_or_feat, "end", end)
start = getattr(chrom_or_feat, "start", start)
rev = getattr(chrom_or_feat, "strand", "+") == "-"
if rev:
return [x for x in res if x.end > start]
else:
return [x for x in res if x.start < end] |
Return k-nearest features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of downstream neighbors to return
_direction : (None, "up", "down")
internal (don't use this)
def knearest(self, table, chrom_or_feat, start=None, end=None, k=1,
_direction=None):
"""
Return k-nearest features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of downstream neighbors to return
_direction : (None, "up", "down")
internal (don't use this)
"""
assert _direction in (None, "up", "down")
# they sent in a feature
if start is None:
assert end is None
chrom, start, end = chrom_or_feat.chrom, chrom_or_feat.start, chrom_or_feat.end
# if the query is directional and the feature as a strand,
# adjust...
if _direction in ("up", "down") and getattr(chrom_or_feat,
"strand", None) == "-":
_direction = "up" if _direction == "down" else "up"
else:
chrom = chrom_or_feat
qstart, qend = long(start), long(end)
res = self.bin_query(table, chrom, qstart, qend)
i, change = 1, 350
try:
while res.count() < k:
if _direction in (None, "up"):
if qstart == 0 and _direction == "up": break
qstart = max(0, qstart - change)
if _direction in (None, "down"):
qend += change
i += 1
change *= (i + 5)
res = self.bin_query(table, chrom, qstart, qend)
except BigException:
return []
def dist(f):
d = 0
if start > f.end:
d = start - f.end
elif f.start > end:
d = f.start - end
# add dist as an attribute to the feature
return d
dists = sorted([(dist(f), f) for f in res])
if len(dists) == 0:
return []
dists, res = zip(*dists)
if len(res) == k:
return res
if k > len(res): # had to break because of end of chrom
if k == 0: return []
k = len(res)
ndist = dists[k - 1]
# include all features that are the same distance as the nth closest
# feature (accounts for ties).
while k < len(res) and dists[k] == ndist:
k = k + 1
return res[:k] |
annotate a file with a number of tables
Parameters
----------
fname : str or file
file name or file-handle
tables : list
list of tables with which to annotate `fname`
feature_strand : bool
if this is True, then the up/downstream designations are based on
the features in `tables` rather than the features in `fname`
in_memoory : bool
if True, then tables are read into memory. This usually makes the
annotation much faster if there are more than 500 features in
`fname` and the number of features in the table is less than 100K.
header : str
header to print out (if True, use existing header)
out : file
where to print output
parallel : bool
if True, use multiprocessing library to execute the annotation of
each chromosome in parallel. Uses more memory.
def annotate(self, fname, tables, feature_strand=False, in_memory=False,
header=None, out=sys.stdout, parallel=False):
"""
annotate a file with a number of tables
Parameters
----------
fname : str or file
file name or file-handle
tables : list
list of tables with which to annotate `fname`
feature_strand : bool
if this is True, then the up/downstream designations are based on
the features in `tables` rather than the features in `fname`
in_memoory : bool
if True, then tables are read into memory. This usually makes the
annotation much faster if there are more than 500 features in
`fname` and the number of features in the table is less than 100K.
header : str
header to print out (if True, use existing header)
out : file
where to print output
parallel : bool
if True, use multiprocessing library to execute the annotation of
each chromosome in parallel. Uses more memory.
"""
from .annotate import annotate
return annotate(self, fname, tables, feature_strand, in_memory, header=header,
out=out, parallel=parallel) |
Get all the bin numbers for a particular interval defined by
(start, end]
def bins(start, end):
"""
Get all the bin numbers for a particular interval defined by
(start, end]
"""
if end - start < 536870912:
offsets = [585, 73, 9, 1]
else:
raise BigException
offsets = [4681, 585, 73, 9, 1]
binFirstShift = 17
binNextShift = 3
start = start >> binFirstShift
end = (end - 1) >> binFirstShift
bins = [1]
for offset in offsets:
bins.extend(range(offset + start, offset + end + 1))
start >>= binNextShift
end >>= binNextShift
return frozenset(bins) |
write a bed12 file of the query.
Parameters
----------
query : query
a table or query to save to file
filename : file
string or filehandle to write output
def save_bed(cls, query, filename=sys.stdout):
"""
write a bed12 file of the query.
Parameters
----------
query : query
a table or query to save to file
filename : file
string or filehandle to write output
"""
out = _open(filename, 'w')
for o in query:
out.write(o.bed() + '\n') |
For example:
{% staticfile "/js/foo.js" %}
or
{% staticfile "/js/foo.js" as variable_name %}
Or for multiples:
{% staticfile "/foo.js; /bar.js" %}
or
{% staticfile "/foo.js; /bar.js" as variable_name %}
def staticfile_node(parser, token, optimize_if_possible=False):
"""For example:
{% staticfile "/js/foo.js" %}
or
{% staticfile "/js/foo.js" as variable_name %}
Or for multiples:
{% staticfile "/foo.js; /bar.js" %}
or
{% staticfile "/foo.js; /bar.js" as variable_name %}
"""
args = token.split_contents()
tag = args[0]
if len(args) == 4 and args[-2] == 'as':
context_name = args[-1]
args = args[:-2]
else:
context_name = None
filename = parser.compile_filter(args[1])
return StaticFileNode(filename,
symlink_if_possible=_CAN_SYMLINK,
optimize_if_possible=optimize_if_possible,
context_name=context_name) |
works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
def _mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
if tail:
os.mkdir(newdir) |
Look for filename in all MEDIA_ROOTS, and return the first one found.
def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found."""
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return filepath, root
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
root, filepath = os.path.split(absolute_path)
return absolute_path, root
except ImportError:
pass
return None, None |
Return a new filename to use as the combined file name for a
bunch of files.
A precondition is that they all have the same file extension
Given that the list of files can have different paths, we aim to use the
most common path.
Example:
/somewhere/else/foo.js
/somewhere/bar.js
/somewhere/different/too/foobar.js
The result will be
/somewhere/foo_bar_foobar.js
Another thing to note, if the filenames have timestamps in them, combine
them all and use the highest timestamp.
def default_combine_filenames_generator(filenames, max_length=40):
"""Return a new filename to use as the combined file name for a
bunch of files.
A precondition is that they all have the same file extension
Given that the list of files can have different paths, we aim to use the
most common path.
Example:
/somewhere/else/foo.js
/somewhere/bar.js
/somewhere/different/too/foobar.js
The result will be
/somewhere/foo_bar_foobar.js
Another thing to note, if the filenames have timestamps in them, combine
them all and use the highest timestamp.
"""
path = None
names = []
extension = None
timestamps = []
for filename in filenames:
name = os.path.basename(filename)
if not extension:
extension = os.path.splitext(name)[1]
elif os.path.splitext(name)[1] != extension:
raise ValueError("Can't combine multiple file extensions")
for each in re.finditer('\.\d{10}\.', name):
timestamps.append(int(each.group().replace('.','')))
name = name.replace(each.group(), '.')
name = os.path.splitext(name)[0]
names.append(name)
if path is None:
path = os.path.dirname(filename)
else:
if len(os.path.dirname(filename)) < len(path):
path = os.path.dirname(filename)
new_filename = '_'.join(names)
if timestamps:
new_filename += ".%s" % max(timestamps)
new_filename = new_filename[:max_length]
new_filename += extension
return os.path.join(path, new_filename) |
inspect the code and look for files that can be turned into combos.
Basically, the developer could type this:
{% slimall %}
<link href="/one.css"/>
<link href="/two.css"/>
{% endslimall %}
And it should be reconsidered like this:
<link href="{% slimfile "/one.css;/two.css" %}"/>
which we already have routines for doing.
def render(self, context):
"""inspect the code and look for files that can be turned into combos.
Basically, the developer could type this:
{% slimall %}
<link href="/one.css"/>
<link href="/two.css"/>
{% endslimall %}
And it should be reconsidered like this:
<link href="{% slimfile "/one.css;/two.css" %}"/>
which we already have routines for doing.
"""
code = self.nodelist.render(context)
if not settings.DJANGO_STATIC:
# Append MEDIA_URL if set
# quick and dirty
if settings.DJANGO_STATIC_MEDIA_URL_ALWAYS:
for match in STYLES_REGEX.finditer(code):
for filename in match.groups():
code = (code.replace(filename,
settings.DJANGO_STATIC_MEDIA_URL + filename))
for match in SCRIPTS_REGEX.finditer(code):
for filename in match.groups():
code = (code.replace(filename,
settings.DJANGO_STATIC_MEDIA_URL + filename))
return code
return code
new_js_filenames = []
for match in SCRIPTS_REGEX.finditer(code):
whole_tag = match.group()
async_defer = ASYNC_DEFER_REGEX.search(whole_tag)
for filename in match.groups():
optimize_if_possible = self.optimize_if_possible
if optimize_if_possible and \
(filename.endswith('.min.js') or filename.endswith('.minified.js')):
# Override! Because we simply don't want to run slimmer
# on files that have the file extension .min.js
optimize_if_possible = False
new_js_filenames.append(filename)
code = code.replace(whole_tag, '')
# Now, we need to combine these files into one
if new_js_filenames:
new_js_filename = _static_file(new_js_filenames,
optimize_if_possible=optimize_if_possible,
symlink_if_possible=self.symlink_if_possible)
else:
new_js_filename = None
new_image_filenames = []
def image_replacer(match):
tag = match.group()
for filename in match.groups():
new_filename = _static_file(filename,
symlink_if_possible=self.symlink_if_possible)
if new_filename != filename:
tag = tag.replace(filename, new_filename)
return tag
code = IMG_REGEX.sub(image_replacer, code)
new_css_filenames = defaultdict(list)
# It's less trivial with CSS because we can't combine those that are
# of different media
media_regex = re.compile('media=["\']([^"\']+)["\']')
for match in STYLES_REGEX.finditer(code):
whole_tag = match.group()
try:
media_type = media_regex.findall(whole_tag)[0]
except IndexError:
media_type = ''
for filename in match.groups():
new_css_filenames[media_type].append(filename)
code = code.replace(whole_tag, '')
# Now, we need to combine these files into one
new_css_filenames_combined = {}
if new_css_filenames:
for media_type, filenames in new_css_filenames.items():
r = _static_file(filenames,
optimize_if_possible=self.optimize_if_possible,
symlink_if_possible=self.symlink_if_possible)
new_css_filenames_combined[media_type] = r
if new_js_filename:
# Now is the time to apply the name prefix if there is one
if async_defer:
new_tag = ('<script %s src="%s"></script>' %
(async_defer.group(0), new_js_filename))
else:
new_tag = '<script src="%s"></script>' % new_js_filename
code = "%s%s" % (new_tag, code)
for media_type, new_css_filename in new_css_filenames_combined.items():
extra_params = ''
if media_type:
extra_params += ' media="%s"' % media_type
new_tag = '<link rel="stylesheet"%s href="%s"/>' % \
(extra_params, new_css_filename)
code = "%s%s" % (new_tag, code)
return code |
check for overlap with the other interval
def overlaps(self, other):
"""
check for overlap with the other interval
"""
if self.chrom != other.chrom: return False
if self.start >= other.end: return False
if other.start >= self.end: return False
return True |
check if this is upstream of the `other` interval taking the strand of
the other interval into account
def is_upstream_of(self, other):
"""
check if this is upstream of the `other` interval taking the strand of
the other interval into account
"""
if self.chrom != other.chrom: return None
if getattr(other, "strand", None) == "+":
return self.end <= other.start
# other feature is on - strand, so this must have higher start
return self.start >= other.end |
check the distance between this an another interval
Parameters
----------
other_or_start : Interval or int
either an integer or an Interval with a start attribute indicating
the start of the interval
end : int
if `other_or_start` is an integer, this must be an integer
indicating the end of the interval
features : bool
if True, the features, such as CDS, intron, etc. that this feature
overlaps are returned.
def distance(self, other_or_start=None, end=None, features=False):
"""
check the distance between this an another interval
Parameters
----------
other_or_start : Interval or int
either an integer or an Interval with a start attribute indicating
the start of the interval
end : int
if `other_or_start` is an integer, this must be an integer
indicating the end of the interval
features : bool
if True, the features, such as CDS, intron, etc. that this feature
overlaps are returned.
"""
if end is None:
assert other_or_start.chrom == self.chrom
other_start, other_end = get_start_end(other_or_start, end)
if other_start > self.end:
return other_start - self.end
if self.start > other_end:
return self.start - other_end
return 0 |
return a list of exons [(start, stop)] for this object if appropriate
def exons(self):
"""
return a list of exons [(start, stop)] for this object if appropriate
"""
# drop the trailing comma
if not self.is_gene_pred: return []
if hasattr(self, "exonStarts"):
try:
starts = (long(s) for s in self.exonStarts[:-1].split(","))
ends = (long(s) for s in self.exonEnds[:-1].split(","))
except TypeError:
starts = (long(s) for s in self.exonStarts[:-1].decode().split(","))
ends = (long(s) for s in self.exonEnds[:-1].decode().split(","))
else: # it is bed12
starts = [self.start + long(s) for s in
self.chromStarts[:-1].decode().split(",")]
ends = [starts[i] + long(size) for i, size \
in enumerate(self.blockSizes[:-1].decode().split(","))]
return zip(starts, ends) |
return a list of features for the gene features of this object.
This would include exons, introns, utrs, etc.
def gene_features(self):
"""
return a list of features for the gene features of this object.
This would include exons, introns, utrs, etc.
"""
nm, strand = self.gene_name, self.strand
feats = [(self.chrom, self.start, self.end, nm, strand, 'gene')]
for feat in ('introns', 'exons', 'utr5', 'utr3', 'cdss'):
fname = feat[:-1] if feat[-1] == 's' else feat
res = getattr(self, feat)
if res is None or all(r is None for r in res): continue
if not isinstance(res, list): res = [res]
feats.extend((self.chrom, s, e, nm, strand, fname) for s, e in res)
tss = self.tss(down=1)
if tss is not None:
feats.append((self.chrom, tss[0], tss[1], nm, strand, 'tss'))
prom = self.promoter()
feats.append((self.chrom, prom[0], prom[1], nm, strand, 'promoter'))
return sorted(feats, key=itemgetter(1)) |
Return a start, end tuple of positions around the transcription-start
site
Parameters
----------
up : int
if greature than 0, the strand is used to add this many upstream
bases in the appropriate direction
down : int
if greature than 0, the strand is used to add this many downstream
bases into the gene.
def tss(self, up=0, down=0):
"""
Return a start, end tuple of positions around the transcription-start
site
Parameters
----------
up : int
if greature than 0, the strand is used to add this many upstream
bases in the appropriate direction
down : int
if greature than 0, the strand is used to add this many downstream
bases into the gene.
"""
if not self.is_gene_pred: return None
tss = self.txEnd if self.strand == '-' else self.txStart
start, end = tss, tss
if self.strand == '+':
start -= up
end += down
else:
start += up
end -= down
start, end = end, start
return max(0, start), max(end, start, 0) |
Return a start, end tuple of positions for the promoter region of this
gene
Parameters
----------
up : int
this distance upstream that is considered the promoter
down : int
the strand is used to add this many downstream bases into the gene.
def promoter(self, up=2000, down=0):
"""
Return a start, end tuple of positions for the promoter region of this
gene
Parameters
----------
up : int
this distance upstream that is considered the promoter
down : int
the strand is used to add this many downstream bases into the gene.
"""
if not self.is_gene_pred: return None
return self.tss(up=up, down=down) |
includes the entire exon as long as any of it is > cdsStart and <
cdsEnd
def coding_exons(self):
"""
includes the entire exon as long as any of it is > cdsStart and <
cdsEnd
"""
# drop the trailing comma
starts = (long(s) for s in self.exonStarts[:-1].split(","))
ends = (long(s) for s in self.exonEnds[:-1].split(","))
return [(s, e) for s, e in zip(starts, ends)
if e > self.cdsStart and
s < self.cdsEnd] |
just the parts of the exons that are translated
def cds(self):
"""just the parts of the exons that are translated"""
ces = self.coding_exons
if len(ces) < 1: return ces
ces[0] = (self.cdsStart, ces[0][1])
ces[-1] = (ces[-1][0], self.cdsEnd)
assert all((s < e for s, e in ces))
return ces |
return a boolean indicating whether this feature is downstream of
`other` taking the strand of other into account
def is_downstream_of(self, other):
"""
return a boolean indicating whether this feature is downstream of
`other` taking the strand of other into account
"""
if self.chrom != other.chrom: return None
if getattr(other, "strand", None) == "-":
# other feature is on - strand, so this must have higher start
return self.end <= other.start
return self.start >= other.end |
return e.g. "intron;exon" if the other_start, end overlap introns and
exons
def features(self, other_start, other_end):
"""
return e.g. "intron;exon" if the other_start, end overlap introns and
exons
"""
# completely encases gene.
if other_start <= self.start and other_end >= self.end:
return ['gene' if self.cdsStart != self.cdsEnd else 'nc_gene']
other = Interval(other_start, other_end)
ovls = []
tx = 'txEnd' if self.strand == "-" else 'txStart'
if hasattr(self, tx) and other_start <= getattr(self, tx) <= other_end \
and self.cdsStart != self.cdsEnd:
ovls = ["TSS"]
for ftype in ('introns', 'exons', 'utr5', 'utr3', 'cdss'):
feats = getattr(self, ftype)
if not isinstance(feats, list): feats = [feats]
if any(Interval(f[0], f[1]).overlaps(other) for f in feats):
ovls.append(ftype[:-1] if ftype[-1] == 's' else ftype)
if 'cds' in ovls:
ovls = [ft for ft in ovls if ft != 'exon']
if self.cdsStart == self.cdsEnd:
ovls = ['nc_' + ft for ft in ovls]
return ovls |
return the (start, end) of the region before the geneStart
def upstream(self, distance):
"""
return the (start, end) of the region before the geneStart
"""
if getattr(self, "strand", None) == "+":
e = self.start
s = e - distance
else:
s = self.end
e = s + distance
return self._xstream(s, e) |
return the 5' UTR if appropriate
def utr5(self):
"""
return the 5' UTR if appropriate
"""
if not self.is_coding or len(self.exons) < 2: return (None, None)
if self.strand == "+":
s, e = (self.txStart, self.cdsStart)
else:
s, e = (self.cdsEnd, self.txEnd)
if s == e: return (None, None)
return s, e |
Return the sequence for this feature.
if per-exon is True, return an array of exon sequences
This sequence is never reverse complemented
def sequence(self, per_exon=False):
"""
Return the sequence for this feature.
if per-exon is True, return an array of exon sequences
This sequence is never reverse complemented
"""
db = self.db
if not per_exon:
start = self.txStart + 1
return _sequence(db, self.chrom, start, self.txEnd)
else:
# TODO: use same strategy as cds_sequence to reduce # of requests.
seqs = []
for start, end in self.exons:
seqs.append(_sequence(db, self.chrom, start + 1, end))
return seqs |
perform an NCBI blast against the sequence of this feature
def ncbi_blast(self, db="nr", megablast=True, sequence=None):
"""
perform an NCBI blast against the sequence of this feature
"""
import requests
requests.defaults.max_retries = 4
assert sequence in (None, "cds", "mrna")
seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence))
r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi',
timeout=20,
data=dict(
PROGRAM="blastn",
#EXPECT=2,
DESCRIPTIONS=100,
ALIGNMENTS=0,
FILTER="L", # low complexity
CMD="Put",
MEGABLAST=True,
DATABASE=db,
QUERY=">%s\n%s" % (self.name, seq)
)
)
if not ("RID =" in r.text and "RTOE" in r.text):
print("no results", file=sys.stderr)
raise StopIteration
rid = r.text.split("RID = ")[1].split("\n")[0]
import time
time.sleep(4)
print("checking...", file=sys.stderr)
r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi',
data=dict(RID=rid, format="Text",
DESCRIPTIONS=100,
DATABASE=db,
CMD="Get", ))
while "Status=WAITING" in r.text:
print("checking...", file=sys.stderr)
time.sleep(10)
r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi',
data=dict(RID=rid, format="Text",
CMD="Get", ))
for rec in _ncbi_parse(r.text):
yield rec |
make a request to the genome-browsers BLAT interface
sequence is one of None, "mrna", "cds"
returns a list of features that are hits to this sequence.
def blat(self, db=None, sequence=None, seq_type="DNA"):
"""
make a request to the genome-browsers BLAT interface
sequence is one of None, "mrna", "cds"
returns a list of features that are hits to this sequence.
"""
from . blat_blast import blat, blat_all
assert sequence in (None, "cds", "mrna")
seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence))
if isinstance(db, (tuple, list)):
return blat_all(seq, self.gene_name, db, seq_type)
else:
return blat(seq, self.gene_name, db or self.db, seq_type) |
return a bed formatted string of this feature
def bed(self, *attrs, **kwargs):
"""
return a bed formatted string of this feature
"""
exclude = ("chrom", "start", "end", "txStart", "txEnd", "chromStart",
"chromEnd")
if self.is_gene_pred:
return self.bed12(**kwargs)
return "\t".join(map(str, (
[self.chrom, self.start, self.end] +
[getattr(self, attr) for attr in attrs if not attr in exclude]
))) |
return a bed12 (http://genome.ucsc.edu/FAQ/FAQformat.html#format1)
representation of this interval
def bed12(self, score="0", rgb="."):
"""
return a bed12 (http://genome.ucsc.edu/FAQ/FAQformat.html#format1)
representation of this interval
"""
if not self.is_gene_pred:
raise CruzException("can't create bed12 from non genepred feature")
exons = list(self.exons)
# go from global start, stop, to relative start, length...
sizes = ",".join([str(e[1] - e[0]) for e in exons]) + ","
starts = ",".join([str(e[0] - self.txStart) for e in exons]) + ","
name = self.name2 + "," + self.name if hasattr(self, "name2") \
else self.name
return "\t".join(map(str, (
self.chrom, self.txStart, self.txEnd, name,
score, self.strand, self.cdsStart, self.cdsEnd, rgb,
len(exons), sizes, starts))) |
convert global coordinate(s) to local taking
introns into account and cds/tx-Start depending on cdna=True kwarg
def localize(self, *positions, **kwargs):
"""
convert global coordinate(s) to local taking
introns into account and cds/tx-Start depending on cdna=True kwarg
"""
cdna = kwargs.get('cdna', False)
# TODO: account for strand ?? add kwarg ??
# if it's to the CDNA, then it's based on the cdsStart
start, end = (self.cdsStart, self.cdsEnd) if cdna else \
(self.start, self.end)
introns = self.introns or None
if cdna:
if not self.is_coding:
return ([None] * len(positions)) if len(positions) > 1 else None
introns = self._introns(self.cds) or None
if introns is None:
local_ps = [p - start if (start <= p < end) else None for p in positions]
return local_ps[0] if len(positions) == 1 else local_ps
introns = [(s - start, e - start) for s, e in introns]
positions = [p - start for p in positions]
# now both introns and positions are local starts based on cds/tx-Start
local_ps = []
l = end - start
for original_p in positions:
subtract = 0
p = original_p
print(p, l, file=sys.stderr)
if p < 0 or p >= l: # outside of transcript
local_ps.append(None)
continue
for s, e in introns:
# within intron
if s <= p <= e:
subtract = None
break
# otherwise, adjust for intron length.
elif p >= e:
subtract += (e - s)
local_ps.append(p - subtract if subtract is not None else None)
assert all(p is None or p >=0 for p in local_ps), (local_ps)
return local_ps[0] if len(positions) == 1 else local_ps |
check the distance between this an another interval
Parameters
----------
other_or_start : Interval or int
either an integer or an Interval with a start attribute indicating
the start of the interval
end : int
if `other_or_start` is an integer, this must be an integer
indicating the end of the interval
features : bool
if True, the features, such as CDS, intron, etc. that this feature
overlaps are returned.
def distance(self, other_or_start=None, end=None, features="unused",
shore_dist=3000):
"""
check the distance between this an another interval
Parameters
----------
other_or_start : Interval or int
either an integer or an Interval with a start attribute indicating
the start of the interval
end : int
if `other_or_start` is an integer, this must be an integer
indicating the end of the interval
features : bool
if True, the features, such as CDS, intron, etc. that this feature
overlaps are returned.
"""
# leave features kwarg to match signature from Feature.distance
if end is None:
assert other_or_start.chrom == self.chrom
other_start, other_end = get_start_end(other_or_start, end)
dist = 0
if other_start > self.end:
dist = other_start - self.end
elif self.start > other_end:
dist = self.start - other_end
assert dist >= 0
if dist > 0: dist = (dist, "shore" if abs(dist) <= shore_dist else "")
else: dist = (0, "island")
return dist |
annotate bed file in fname with tables.
distances are integers for distance. and intron/exon/utr5 etc for gene-pred
tables. if the annotation features have a strand, the distance reported is
negative if the annotation feature is upstream of the feature in question
if feature_strand is True, then the distance is negative if t
def annotate(g, fname, tables, feature_strand=False, in_memory=False,
header=None, out=sys.stdout, _chrom=None, parallel=False):
"""
annotate bed file in fname with tables.
distances are integers for distance. and intron/exon/utr5 etc for gene-pred
tables. if the annotation features have a strand, the distance reported is
negative if the annotation feature is upstream of the feature in question
if feature_strand is True, then the distance is negative if t
"""
close = False
if isinstance(out, basestring):
out = nopen(out, "w")
close = True
if parallel:
import multiprocessing
import signal
p = multiprocessing.Pool(initializer=lambda:
signal.signal(signal.SIGINT, signal.SIG_IGN))
chroms = _split_chroms(fname)
def write_result(fanno, written=[False]):
for i, d in enumerate(reader(fanno, header="ordered")):
if i == 0 and written[0] == False:
print >>out, "\t".join(d.keys())
written[0] = True
print >>out, "\t".join(x if x else "NA" for x in d.values())
os.unlink(fanno)
os.unlink(fanno.replace(".anno", ""))
for fchrom, (fout, fanno) in chroms:
p.apply_async(annotate, args=(g.db, fout.name, tables, feature_strand, True,
header, fanno, fchrom),
callback=write_result)
p.close()
p.join()
return out.name
if isinstance(g, basestring):
from . import Genome
g = Genome(g)
if in_memory:
from . intersecter import Intersecter
intersecters = [] # 1 per table.
for t in tables:
q = getattr(g, t) if isinstance(t, basestring) else t
if _chrom is not None:
q = q.filter_by(chrom=_chrom)
table_iter = q #page_query(q, g.session)
intersecters.append(Intersecter(table_iter))
elif isinstance(fname, basestring) and os.path.exists(fname) \
and sum(1 for _ in nopen(fname)) > 25000:
print >>sys.stderr, "annotating many intervals, may be faster using in_memory=True"
if header is None:
header = []
extra_header = []
for j, toks in enumerate(reader(fname, header=False)):
if j == 0 and not header:
if not (toks[1] + toks[2]).isdigit():
header = toks
if j == 0:
for t in tables:
annos = (getattr(g, t) if isinstance(t, basestring) else t).first().anno_cols
h = t if isinstance(t, basestring) else t._table.name if hasattr(t, "_table") else t.first()._table.name
extra_header += ["%s_%s" % (h, a) for a in annos]
if 0 != len(header):
if not header[0].startswith("#"):
header[0] = "#" + header[0]
print >>out, "\t".join(header + extra_header)
if header == toks: continue
if not isinstance(toks, ABase):
f = Feature()
f.chrom = toks[0]
f.txStart = int(toks[1])
f.txEnd = int(toks[2])
try:
f.strand = toks[header.index('strand')]
except ValueError:
pass
else:
f = toks
# for now, use the objects str to get the columns
# might want to use getattr on the original cols
toks = f.bed(*header).split("\t")
sep = "^*^"
for ti, tbl in enumerate(tables):
if in_memory:
objs = intersecters[ti].knearest(int(toks[1]), int(toks[2]), chrom=toks[0], k = 1)
else:
objs = g.knearest(tbl, toks[0], int(toks[1]), int(toks[2]), k=1)
if len(objs) == 0:
print >>out, "\t".join(toks + ["", "", ""])
continue
gp = hasattr(objs[0], "exonStarts")
names = [o.gene_name for o in objs]
if feature_strand:
strands = [-1 if f.is_upstream_of(o) else 1 for o in objs]
else:
strands = [-1 if o.is_upstream_of(f) else 1 for o in objs]
# dists can be a list of tuples where the 2nd item is something
# like 'island' or 'shore'
dists = [o.distance(f, features=gp) for o in objs]
pure_dists = [d[0] if isinstance(d, (tuple, list)) else d for d in dists]
# convert to negative if the feature is upstream of the query
for i, s in enumerate(strands):
if s == 1: continue
if isinstance(pure_dists[i], basestring): continue
pure_dists[i] *= -1
for i, (pd, d) in enumerate(zip(pure_dists, dists)):
if isinstance(d, tuple):
if len(d) > 1:
dists[i] = "%s%s%s" % (pd, sep, sep.join(d[1:]))
else:
dists[i] = pd
# keep uniqe name, dist combinations (occurs because of
# transcripts)
name_dists = set(["%s%s%s" % (n, sep, d) \
for (n, d) in zip(names, dists)])
name_dists = [nd.split(sep) for nd in name_dists]
# just take the first gene name if they are all the same
if len(set(nd[0] for nd in name_dists)) == 1:
toks.append(name_dists[0][0])
else:
toks.append(";".join(nd[0] for nd in name_dists))
# iterate over the feat type, dist cols
for i in range(1, len(name_dists[0])):
toks.append(";".join(nd[i] for nd in name_dists))
print >>out, "\t".join(toks)
if close:
out.close()
return out.name |
External entry point which calls main() and
if Stop is raised, calls sys.exit()
def entry_point():
"""
External entry point which calls main() and
if Stop is raised, calls sys.exit()
"""
try:
main("omego", items=[
(InstallCommand.NAME, InstallCommand),
(UpgradeCommand.NAME, UpgradeCommand),
(ConvertCommand.NAME, ConvertCommand),
(DownloadCommand.NAME, DownloadCommand),
(DbCommand.NAME, DbCommand),
(Version.NAME, Version)])
except Stop, stop:
if stop.rc != 0:
print "ERROR:", stop
else:
print stop
sys.exit(stop.rc) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.