text stringlengths 0 1.05M | meta dict |
|---|---|
"""A scene object manages a TVTK scene and objects in it.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Event, List, Str, Instance
from traitsui.api import View, Group, Item
from apptools.persistence.state_pickler import set_state
# Local imports.
from tvtk.pyface.tvtk_scene import TVTKScene
from mayavi.core.base import Base
from mayavi.core.source import Source
from mayavi.core.common import handle_children_state, exception
from mayavi.core.adder_node import SourceAdderNode
######################################################################
# `Scene` class.
######################################################################
class Scene(Base):
""" The Mayavi scene class.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The scene (RenderWindow) associated with this component -- we
# redeclare it here just to be able to record this scene, we don't
# want it recorded on all objects since the scene is shared
# (although it isn't an error to register an object twice with the
# recorder).
scene = Instance(TVTKScene, record=True)
# The source objects associated with this object.
children = List(Source, record=True)
# The name of this scene.
name = Str('TVTK Scene')
# The icon
icon = Str('scene.ico')
# The human-readable type for this object
type = Str(' scene')
# The objects view.
view = View(Group(Item(name='scene', style='custom'),
show_labels=False)
)
# The adder node dialog class
_adder_node_class = SourceAdderNode
# The dispatch, to register callbacks on mouse pick
_mouse_pick_dispatcher = Instance(
'mayavi.core.mouse_pick_dispatcher.MousePickDispatcher',
record=False)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
# Base removes the scene, but we need to save it!
d = super(Scene, self).__get_pure_state__()
d['scene'] = self.scene
d.pop('_mouse_pick_dispatcher', None)
return d
def __set_pure_state__(self, state):
handle_children_state(self.children, state.children)
# Now set our complete state. Doing the scene last ensures
# that the camera view is set right.
set_state(self, state, last=['scene'])
######################################################################
# `Scene` interface
######################################################################
def on_mouse_pick(self, callback, type='point', button='Left',
remove=False):
""" Add a picking callback on mouse click.
When the mouse button is press, object picking is called, and
the given callback is invoked with the resulting pick
as an argument.
**Keyword arguments**
:type: 'point', 'cell', or 'world'
The picker type used for picking.
:button: 'Left', 'Middle', or 'Right'
The mouse button triggering the picking event.
:remove: boolean
If remove is True, the callback is removed from the
list of callbacks.
**Returns**
picker: a tvtk picker
The picker that will be used to do the picking.
**Notes**
The callback must accept one argument: the TVTK picker.
The same callback can be added multiple times.
"""
key = (callback, type, button)
if remove:
self._mouse_pick_dispatcher.callbacks.remove(key)
else:
self._mouse_pick_dispatcher.callbacks.append(key)
return self._mouse_pick_dispatcher._active_pickers[type]
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline.
"""
# Do nothing if we are already running.
if self.running:
return
# Start all our children.
for obj in self.children:
obj.start()
# Disallow the hide action in the context menu
self._HideShowAction.enabled = False
super(Scene, self).start()
def stop(self):
"""Invoked when this object is removed from the mayavi
pipeline.
"""
if not self.running:
return
# Disable rendering to accelerate shutting down.
scene = self.scene
if scene is not None:
status = scene.disable_render
scene.disable_render = True
try:
# Stop all our children.
for obj in self.children:
obj.stop()
finally:
# Re-enable rendering.
if scene is not None:
scene.disable_render = status
self.scene = None
super(Scene, self).stop()
def add_child(self, child):
"""This method intelligently adds a child to this object in
the MayaVi pipeline.
"""
self.children.append(child)
def remove_child(self, child):
"""Remove specified child from our children.
"""
self.children.remove(child)
def remove(self):
"""Remove ourselves from the mayavi pipeline.
"""
if self.parent is not None:
self.stop()
self.parent.close_scene(self)
######################################################################
# `TreeNodeObject` interface
######################################################################
def tno_can_add(self, node, add_object):
""" Returns whether a given object is droppable on the node.
"""
try:
if issubclass(add_object, Source):
return True
except TypeError:
if isinstance(add_object, Source):
return True
return False
def tno_drop_object(self, node, dropped_object):
""" Returns a droppable version of a specified object.
"""
if isinstance(dropped_object, Source):
return dropped_object
######################################################################
# Non-public interface
######################################################################
def _children_changed(self, old, new):
self._handle_children(old, new)
def _children_items_changed(self, list_event):
self._handle_children(list_event.removed, list_event.added)
def _handle_children(self, removed, added):
for obj in removed:
obj.stop()
for obj in added:
obj.set(scene=self.scene, parent=self)
if self.running:
# It makes sense to start children only if we are running.
# If not, the children will be started when we start.
try:
obj.start()
except:
exception()
def _menu_helper_default(self):
from mayavi.core.traits_menu import SourceMenuHelper
return SourceMenuHelper(object=self)
def __mouse_pick_dispatcher_default(self):
from mayavi.core.mouse_pick_dispatcher import \
MousePickDispatcher
return MousePickDispatcher(scene=self)
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/core/scene.py",
"copies": "3",
"size": "7730",
"license": "bsd-3-clause",
"hash": -2380652064266778600,
"line_mean": 33.0528634361,
"line_max": 74,
"alpha_frac": 0.5263906856,
"autogenerated": false,
"ratio": 4.759852216748769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6786242902348769,
"avg_score": null,
"num_lines": null
} |
"""A scheduler that controls the execution of multiple "tasks"
"""
import os
import subprocess
import time
import signal
from collections import deque
import heapq
import logging
import StringIO
from inspect import isfunction, ismethod
# logging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(message)s")
# commands passed from task to scheduler
# FIXME this is different category that should not be used directly by tasks
class TaskFinished(object):
"""indicate this task has finished"""
pass
class TaskSleep(object):
"""sleep current task until scheduled time """
def __init__(self, delay=None):
self.delay = delay
#TODO: merge sleep/pause
class TaskPause(object):
"""Pause current task until given task terminates """
def __init__(self, tid=None):
self.tid = tid
class TaskCancel(object):
"""Cancel task"""
def __init__(self, tid):
self.tid = tid
###################################################
class Task(object):
"""Base for all tasks
@cvar tid_counter (int): provides an unique "task id" for every task
@ivar tid (int): task id
@ivar name (str): task name (not functional role, just info)
@ivar scheduled (float): when this task should be executed,
in seconds since epoch (as given by time.time)
@ivar lock (str): name of the lock this task holds while running
@ivar dependents (list - int): list of tasks that are waitng for this
task to be finished.
This class represents an activity that is run controlled by the Scheduler.
There are two ways to specify the activity: by passing a callable object to
the constructor, or by overriding the run() method in a subclass.
The activity function might be a simple method or a coroutine.
(shameless copied from python threading)
"""
tid_counter = 0
def __init__(self, run_method=None, name=None, scheduled=None, lock=None):
Task.tid_counter += 1
self.tid = Task.tid_counter
self.name = name
self.scheduled = scheduled
self.lock = lock
self.dependents = []
if run_method is not None:
self.run = run_method
assert self.run #TODO
self._coroutine = None
if self.isgeneratorfunction(self.run):
self._coroutine = self.run()
self._started = False # started running
self.cancelled = False
def __str__(self):
class_name = self.__class__.__name__
if self.name:
return "%s-%s(%s)" % (self.tid, self.name, class_name)
return "%s-%s" % (self.tid, class_name)
def __cmp__(self, other):
"""comparison based on scheduled time"""
return cmp(self.scheduled, other.scheduled)
def isgeneratorfunction(self, object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
(copied from python2.6 inspect module)
"""
CO_GENERATOR = 0x20
return bool((isfunction(object) or ismethod(object)) and
object.func_code.co_flags & CO_GENERATOR)
def run_iteration(self):
if self.cancelled:
return TaskFinished()
if self._coroutine:
try:
if not self._started:
self._started = True
return self._coroutine.next()
assert not self._coroutine.gi_running # TODO: remove this
# TODO: can i just use next?
return self._coroutine.send(None)
except StopIteration:
return TaskFinished()
# run is simple function
else:
self._started = True
self.run()
return TaskFinished()
class PeriodicTask(Task):
"""Periodically repeats a task
* A new instance of the task is created on each interval.
* The created task gets a reference to the Periodic task (attribute parent)
* The period is counted from the time the task starts, you need to make
sure the interval is bigger than the time necessary to execute tasks,
otherwise they will "accumulate" although they are never executed in
parallel.
"""
def __init__(self, interval, task_class, t_args=None, t_kwargs=None,
name=None):
Task.__init__(self, name=name)
self.interval = interval
self.task_class = task_class
self.args = t_args or []
self.kwargs = t_kwargs or {}
def run(self):
while True:
now = time.time()
next_iteration = now + self.interval
# TODO: take last time executed into consideration!
new_task = self.task_class(*self.args, **self.kwargs)
new_task.parent = self
self.scheduled = next_iteration
yield (new_task, TaskSleep())
class ProcessTask(Task):
"""A task that executes a shell command"""
def __init__(self, cmd, timeout=None, lock=None):
"""
@param cmd (list): list of strings for Popen
@param timeout (float): time in seconds for terminating the process
"""
Task.__init__(self, lock=lock)
self.cmd = cmd
self.proc = None
self.outdata = StringIO.StringIO()
self.errdata = StringIO.StringIO()
self.timeout = timeout
self._force_killed = False
def __str__(self):
return Task.__str__(self) + "(%s)" % " ".join(self.cmd)
def run(self):
self.proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# wait for self.proc to finish
sched_operations = [TaskPause()]
timeout_task = None
if self.timeout:
now = time.time()
name = "watch dog for %s" % self.proc.pid
timeout_task = Task(self._watchdog, name, now + self.timeout)
sched_operations.append(timeout_task)
yield sched_operations
# cancel timeout task
if timeout_task:
yield TaskCancel(timeout_task.tid)
if self._force_killed:
logging.warn("force killed task:%s" % self)
# GIVE UP everything... it might hang again if you try to
# read its output...
return
# TODO this is getting tricky... and ugly see:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/9e19f3a79449f536/
# try some king of polling? or turn-on / turn-off signal handling...
# stdout
while True:
try:
buff = self.proc.stdout.read(1024)
if not buff:
break
self.outdata.write(buff)
# in case a signal is received while reading proc.stdout
except IOError:
pass
# stderr
while True:
try:
buff = self.proc.stderr.read(1024)
if not buff:
break
self.errdata.write(buff)
except IOError:
pass
def _signal(self, sig_name):
this_gpid = os.getpgid(0)
try:
target_gpid = os.getpgid(self.proc.pid)
except OSError:
# process terminated
target_gpid = None
# gone, no need to send signal
if ((self.proc.returncode is not None) and (this_gpid != target_gpid)):
return
logging.info("%s %s" % (sig_name, self.proc.pid))
try:
if (target_gpid is None) or (this_gpid == target_gpid):
os.kill(self.proc.pid, getattr(signal, sig_name))
else:
logging.info("KILL pgid %s " % target_gpid)
os.killpg(target_gpid, getattr(signal, sig_name))
except OSError:
pass # probably process already terminated. ignore.
def terminate(self):
self._signal("SIGTERM")
def kill(self):
self._signal("SIGKILL")
def _watchdog(self):
"""kill hanging process
this method should be used as a target to another Task
"""
self._force_killed = True
self.terminate()
yield TaskSleep(20) # give some time to process terminate
self.kill()
def get_returncode(self):
"""check if process terminated its execution and get returncode
@returns (int) returncode or None if process is still running,
or already returned code once
"""
if not self._started:
return None
# already returned value before...
if self.proc and self.proc.returncode is not None:
return None
try:
pid, status = os.waitpid(self.proc.pid, os.WNOHANG)
# Python bug #2475
# you can not get the returncode twice !
if pid:
self.proc.returncode = status
logging.debug("Process pid:%s tid:%s terminated satus:%s" %
(pid, self.tid, status))
except OSError:
# system call can't find pid. it's gone. ignore it
pass
return self.proc.returncode
class PidTask(Task):
"""find ProcessTask from pid"""
def __init__(self, sched):
Task.__init__(self)
self.sched = sched
def run(self):
for t in self.sched.tasks.itervalues():
if not isinstance(t, ProcessTask):
continue
returncode = t.get_returncode()
if returncode is not None:
self.sched.ready_task(t)
class GroupTask(Task):
"""Execute group of tasks in sequence (one at a time)"""
def __init__(self, task_list):
Task.__init__(self)
self.task_list = task_list[::-1] #reverse list
def run(self):
while self.task_list:
task = self.task_list.pop()
(yield (task, TaskPause(task.tid)))
class Scheduler(object):
def __init__(self, use_sigchld=True):
self.tasks = {}
# TODO use Queue (thread-safe)
self.ready = deque() # ready to execute tasks
self.waiting = [] # scheduled to be executed in the future
self.locks = {}
if use_sigchld:
self._register_sigchld()
def _register_sigchld(self):
# create a task to identify terminated process tid
def handle_child_terminate(signum, frame):
self.add_task(PidTask(self))
signal.signal(signal.SIGCHLD, handle_child_terminate)
def add_task(self, task, delay=0):
"""add task to scheduler (and to ready/scheduled queues
delay == 0 => dont modify scheduled time
delay < 0 => not ready
delay > 0 => scheduled delay from now
"""
self.tasks[task.tid] = task
# set scheduled time
if delay > 0:
task.scheduled = time.time() + delay
elif delay < 0:
task.scheduled = None
# ready/schedule/wait
if delay == 0 and (task.scheduled is None):
self.ready_task(task)
elif task.scheduled:
self.sleep_task(task)
def ready_task(self, task):
ready_tid = [t.tid for t in self.ready]
if task.tid not in ready_tid:
self.ready.append(task)
else:
logging.warn("Tried to add task (%s) to ready queue twice. (%s)" %
(task.tid, ready_tid))
def sleep_task(self, task):
# can not be called by a task in ready queue
ready_tid = [t.tid for t in self.ready]
assert task.tid not in ready_tid
heapq.heappush(self.waiting, task)
def run_task(self, task):
# note task should be pop-out of ready queue before calling this
if (not task._started) and task.lock:
# locked can't execute now
if task.lock in self.locks:
self.locks[task.lock].append(task)
logging.info("%s \t locked" % task)
return
# lock other and start
self.locks[task.lock] = deque()
logging.info("%s \t running" % task)
operations = task.run_iteration()
# make sure return value is iterable
if not hasattr(operations, '__iter__'):
operations = (operations,)
reschedule = True # add task to ready queue again
for op in operations:
# got a new task
if isinstance(op, Task):
self.add_task(op)
# task finished remove it from scheduler
elif isinstance(op, TaskFinished):
reschedule = False
if task.lock:
lock_list = self.locks[task.lock]
while lock_list:
self.ready_task(lock_list.popleft())
del self.locks[task.lock]
for dependent_tid in task.dependents:
self.ready_task(self.tasks[dependent_tid])
del self.tasks[task.tid]
# sleep
elif isinstance(op, TaskSleep):
reschedule = False
if op.delay:
task.scheduled = time.time() + op.delay
self.sleep_task(task)
# pause
elif isinstance(op, TaskPause):
reschedule = False
if op.tid is not None:
self.tasks[op.tid].dependents.append(task.tid)
# cancel
elif isinstance(op, TaskCancel):
if op.tid in self.tasks:
self.tasks[op.tid].cancelled = True
#FIXME be more agressive when a task gets cancelled.
# do nothing
elif op is not None:
raise Exception("returned invalid value %s" % op)
if reschedule:
self.ready_task(task)
def loop(self):
"""loop until there are no more active tasks"""
while self.tasks:
self.loop_iteration()
def loop_iteration(self):
now = time.time()
# add scheduled tasks
while self.waiting and (self.waiting[0].scheduled <= now):
self.ready_task(heapq.heappop(self.waiting))
# execute tasks that are ready to be executed
if self.ready:
task = self.ready.popleft()
self.run_task(task)
return # just sleep if no task was run
# wait for until next scheduled task is ready
# TODO pause if (not self.waiting) ?
interval = (self.waiting[0].scheduled - now) if self.waiting else 60
logging.debug("*** sleeping %s" % interval)
logging.info(self.print_state())
time.sleep(interval)
def print_state(self):
out = "\n/--------------------------------\n"
if self.waiting:
out += "WAITING: \n"
for wait in self.waiting:
#TODO do not display cancelled tasks
out += "%s -> %ss\n" %(str(wait), wait.scheduled - time.time())
if self.locks:
for lock, values in self.locks.iteritems():
out += "Lock:%s => %s\n"% (lock, ", ".join(str(t) for t in values))
out += "/--------------------------------\n\n"
return out
# TODO
# RPC/webserver
# threaded task
# async db
# pause/resume
| {
"repo_name": "schettino72/serveronduty",
"path": "sodd/scheduler.py",
"copies": "1",
"size": "15473",
"license": "mit",
"hash": 6569798814859897000,
"line_mean": 32.061965812,
"line_max": 96,
"alpha_frac": 0.557681122,
"autogenerated": false,
"ratio": 4.316039051603905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5373720173603904,
"avg_score": null,
"num_lines": null
} |
"""A scheduler that gets payoffs from a local simulation"""
import asyncio
import collections
import contextlib
import json
import logging
import subprocess
from gameanalysis import paygame
from gameanalysis import rsgame
from gameanalysis import utils
from egta import profsched
class _SimulationScheduler(
profsched._AOpenableScheduler
): # pylint: disable=too-many-instance-attributes,protected-access
"""Schedule profiles using a command line program
Parameters
----------
game : RsGame
A gameanalysis game that indicates how array profiles should be turned
into json profiles.
config : {key: value}
A dictionary mapping string keys to values that will be passed to the
simulator in the standard simulation spec format.
command : [str]
A list of strings that represents a command line program to run. This
program must accept simulation spec files as flushed lines of input to
standard in, and write the resulting output as an observation to
standard out. After all input lines have been read, this must flush the
output otherwise this could hang waiting for results that are trapped
in a buffer.
buff_size : int, optional
The maximum number of bytes to send to the command at a time. The
default should be fine for most applications, but if you know your
machine has a larger or smaller buffer size, setting this accurately
will prevent unnecessary blocking.
"""
def __init__(self, game, config, command, buff_size=65536):
super().__init__(game.role_names, game.strat_names, game.num_role_players)
self._game = paygame.game_copy(rsgame.empty_copy(game))
self._base = {"configuration": config}
self.command = command
self.buff_size = buff_size
self._is_open = False
self._proc = None
self._reader = None
self._read_queue = asyncio.Queue()
self._write_lock = asyncio.Lock()
self._buffer_empty = asyncio.Event()
self._buffer_bytes = 0
self._line_bytes = collections.deque()
self._buffer_empty.set()
async def sample_payoffs(self, profile):
utils.check(self._is_open, "not open")
self._base["assignment"] = self._game.profile_to_json(profile)
bprof = json.dumps(self._base, separators=(",", ":")).encode("utf8")
size = len(bprof) + 1
utils.check(
size < self.buff_size,
"profile could not be written to buffer without blocking",
)
async with self._write_lock:
self._buffer_bytes += size
self._line_bytes.appendleft(size)
if self._buffer_bytes >= self.buff_size:
self._buffer_empty.clear()
await self._buffer_empty.wait()
got_data = asyncio.Event()
line = [None]
self._read_queue.put_nowait((line, got_data))
self._proc.stdin.write(bprof)
self._proc.stdin.write(b"\n")
try:
await self._proc.stdin.drain()
except ConnectionError: # pragma: no cover race condition
raise RuntimeError("process died unexpectedly")
logging.debug("scheduled profile: %s", self._game.profile_to_repr(profile))
await got_data.wait()
if self._reader.done() and self._reader.exception() is not None:
raise self._reader.exception()
jpays = json.loads(line[0].decode("utf8"))
payoffs = self._game.payoff_from_json(jpays)
payoffs.setflags(write=False)
logging.debug("read payoff for profile: %s", self.profile_to_repr(profile))
return payoffs
async def _read(self):
"""Read line loop"""
while True:
line, got_data = await self._read_queue.get()
try:
line[0] = await self._proc.stdout.readline()
if not line[0]:
raise RuntimeError("process died unexpectedly")
self._buffer_bytes -= self._line_bytes.pop()
if self._buffer_bytes < self.buff_size: # pragma: no branch
self._buffer_empty.set()
finally:
got_data.set()
async def aopen(self):
"""Open the simsched"""
utils.check(not self._is_open, "can't open twice")
utils.check(self._proc is None, "proce must be None")
utils.check(self._reader is None, "stream must be None")
try:
self._proc = await asyncio.create_subprocess_exec(
*self.command, stdout=subprocess.PIPE, stdin=subprocess.PIPE
)
self._reader = asyncio.ensure_future(self._read())
self._is_open = True
except Exception as ex:
# XXX This line exists to fool duplication check
await self.aclose()
raise ex
return self
async def aclose(self):
"""Close the simsched"""
self._is_open = False
if self._reader is not None:
self._reader.cancel()
with contextlib.suppress(Exception, asyncio.CancelledError):
await self._reader
self._reader = None
if self._proc is not None:
with contextlib.suppress(ProcessLookupError):
self._proc.terminate()
with contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(self._proc.wait(), 0.25)
with contextlib.suppress(ProcessLookupError):
self._proc.kill()
with contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(self._proc.wait(), 0.25)
self._proc = None
while not self._read_queue.empty():
self._read_queue.get_nowait()
self._buffer_empty.set()
self._buffer_bytes = 0
self._line_bytes.clear()
def __str__(self):
return " ".join(self.command)
def simsched(game, config, command, buff_size=65536):
"""Create a new simsched"""
return _SimulationScheduler(game, config, command, buff_size=buff_size)
| {
"repo_name": "egtaonline/quiesce",
"path": "egta/simsched.py",
"copies": "1",
"size": "6177",
"license": "apache-2.0",
"hash": -5309441196663598000,
"line_mean": 36.8957055215,
"line_max": 83,
"alpha_frac": 0.6036911122,
"autogenerated": false,
"ratio": 4.262939958592132,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021998599129207448,
"num_lines": 163
} |
"""A scheduler that gets payoffs from a local simulation"""
import asyncio
import itertools
import json
import logging
import os
import shutil
import tempfile
import zipfile
from gameanalysis import paygame
from gameanalysis import rsgame
from gameanalysis import utils
from egta import profsched
class _ZipScheduler(
profsched._OpenableScheduler
): # pylint: disable=too-many-instance-attributes,protected-access
"""Schedule profiles using am EGTA Online zip file
Parameters
----------
game : RsGame
A gameanalysis game that indicates how array profiles should be turned
into json profiles.
config : {key: value}
A dictionary mapping string keys to values that will be passed to the
simulator in the standard simulation spec format.
zipf : string, file-like
A zip file that follows the same semantics that EGTA Online expects.
max_procs : int, optional
The maximum number of processes to spawn for simulations.
"""
def __init__(self, game, conf, zipf, *, max_procs=4, simultaneous_obs=1):
super().__init__(game.role_names, game.strat_names, game.num_role_players)
self._game = paygame.game_copy(rsgame.empty_copy(game))
self.conf = conf
self.zipf = zipf
self._extra_profs = {}
self._base = {}
self._count = simultaneous_obs
self._is_open = False
self._sim_dir = None
self._prof_dir = None
self._sim_root = None
self._num = 0
self._procs = asyncio.Semaphore(max_procs)
async def sample_payoffs(self, profile):
utils.check(self._is_open, "must enter scheduler")
hprof = utils.hash_array(profile)
counter, queue = self._extra_profs.get(hprof, (None, None))
if counter is not None:
# Already scheduling some profiles
if next(counter) >= self._count:
self._extra_profs.pop(hprof)
pay = await queue.get()
logging.debug("read payoff for profile: %s", self.profile_to_repr(profile))
return pay
else:
# Need to schedule new profiles
direc = os.path.join(self._prof_dir.name, str(self._num))
self._num += 1
queue = asyncio.Queue()
if self._count > 1:
self._extra_profs[hprof] = (itertools.count(2), queue)
os.makedirs(direc)
self._base["assignment"] = self._game.profile_to_assignment(profile)
with open(os.path.join(direc, "simulation_spec.json"), "w") as fil:
json.dump(self._base, fil)
logging.debug(
"scheduled %d profile%s: %s",
self._count,
"" if self._count == 1 else "s",
self.profile_to_repr(profile),
)
# Limit simultaneous processes
async with self._procs:
proc = await asyncio.create_subprocess_exec(
os.path.join("script", "batch"),
direc,
str(self._count),
cwd=self._sim_root,
stderr=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.DEVNULL,
)
_, err = await proc.communicate()
utils.check(
proc.returncode == 0,
"process failed with returncode {:d} and stderr {}",
proc.returncode,
err,
)
obs_files = (
f
for f in os.listdir(direc)
if "observation" in f and f.endswith(".json")
)
for _ in range(self._count):
obs_file = next(obs_files, None)
utils.check(
obs_file is not None,
"simulation didn't write enough observation files",
)
with open(os.path.join(direc, obs_file)) as fil:
pay = self._game.payoff_from_json(json.load(fil))
pay.setflags(write=False)
queue.put_nowait(pay)
obs_file = next(obs_files, None)
utils.check(obs_file is None, "simulation wrote too many observation files")
shutil.rmtree(direc)
pay = queue.get_nowait()
logging.debug("read payoff for profile: %s", self.profile_to_repr(profile))
return pay
def open(self):
"""Open the zip scheduler"""
utils.check(not self._is_open, "can't be open")
try:
self._num = 0
self._sim_dir = tempfile.TemporaryDirectory()
self._prof_dir = tempfile.TemporaryDirectory()
with zipfile.ZipFile(self.zipf) as zfil:
zfil.extractall(self._sim_dir.name)
sim_files = [
d for d in os.listdir(self._sim_dir.name) if d not in {"__MACOSX"}
]
utils.check(
len(sim_files) == 1,
"improper zip format, only one file should exist in root",
)
self._sim_root = os.path.join(self._sim_dir.name, sim_files[0])
os.chmod(os.path.join(self._sim_root, "script", "batch"), 0o700)
with open(os.path.join(self._sim_root, "defaults.json")) as fil:
self._base["configuration"] = json.load(fil).get("configuration", {})
self._base["configuration"].update(self.conf)
self._is_open = True
except Exception as ex:
self.close()
raise ex
def close(self):
"""Close the zip scheduler"""
self._is_open = False
self._sim_dir.cleanup()
self._prof_dir.cleanup()
def __str__(self):
return self.zipf
def zipsched(game, conf, zipf, *, max_procs=4, simultaneous_obs=1):
"""Create a zip scheduler"""
return _ZipScheduler(
game, conf, zipf, max_procs=max_procs, simultaneous_obs=simultaneous_obs
)
| {
"repo_name": "egtaonline/quiesce",
"path": "egta/zipsched.py",
"copies": "1",
"size": "6025",
"license": "apache-2.0",
"hash": -7446523688862852000,
"line_mean": 35.5151515152,
"line_max": 88,
"alpha_frac": 0.550373444,
"autogenerated": false,
"ratio": 4.126712328767123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5177085772767123,
"avg_score": null,
"num_lines": null
} |
"""A scheme for assigning categories to morphs.
To change the number or meaning of categories,
only this file should need to be modified.
"""
from __future__ import unicode_literals
import collections
import locale
import logging
import math
import sys
from . import utils
PY3 = sys.version_info.major == 3
# _str is used to convert command line arguments to the right type
# (str for PY3, unicode for PY2)
if PY3:
_str = str
else:
_str = lambda x: unicode(x, encoding=locale.getpreferredencoding())
_logger = logging.getLogger(__name__)
class WordBoundary(object):
"""A special symbol for marking word boundaries.
Using an object of this type allows arbitrary characters in the corpus,
while using a string e.g. '#' instead causes that char to be reserved.
"""
def __repr__(self):
return '#'
def __len__(self):
return 0
def __eq__(self, other):
# Word boundaries from different runs should be equal
if isinstance(other, WordBoundary):
return True
return False
def __hash__(self):
# This is called a lot. Using constant for minor optimization.
#return hash(self.__class__.__name__)
return 8364886854198508766
# Using a string is slightly faster.
# Change to WordBoundary if you want to e.g. support '#':s in the corpus
WORD_BOUNDARY = '#' # WordBoundary()
##################################
### Categorization-dependent code:
### to change the categories, only code in this module
### should need to be changed.
# A data structure with one value for each category.
# This also defines the set of possible categories
ByCategory = collections.namedtuple('ByCategory',
['PRE', 'STM', 'SUF', 'ZZZ'])
DEFAULT_CATEGORY = 'STM'
# The morph usage/context features used to calculate the probability of a
# morph belonging to a category.
MorphContext = collections.namedtuple('MorphContext',
['count', 'left_perplexity',
'right_perplexity'])
AnalysisAlternative = collections.namedtuple('AnalysisAlternative',
['analysis', 'penalty'])
# Context type flags, from which the context type is formed.
# (Binary flags in integer format)
CONTEXT_FLAG_INITIAL = 1
CONTEXT_FLAG_FINAL = 2
# The context type values
CONTEXT_TYPE_INTERNAL = 0
CONTEXT_TYPE_INITIAL = CONTEXT_TYPE_INTERNAL + CONTEXT_FLAG_INITIAL
CONTEXT_TYPE_FINAL = CONTEXT_TYPE_INTERNAL + CONTEXT_FLAG_FINAL
CONTEXT_TYPE_BOTH = (CONTEXT_TYPE_INTERNAL + CONTEXT_FLAG_INITIAL +
CONTEXT_FLAG_FINAL)
# Penalty for each non-morpheme, in heuristic postprocessing
# Must be smaller than LOGPROB_ZERO, to prevent impossible taggings from
# being generated.
NON_MORPHEME_PENALTY = 50
class Postprocessor(object):
def __init__(self):
self.temporaries = set()
"""abstract base class for heuristic output postprocessors"""
def _join_at(self, analysis, i):
"""Helper function for joins"""
tag = analysis[i].category
if analysis[i + 1].category != 'ZZZ':
tag = analysis[i + 1].category
if tag == 'ZZZ':
tag = 'STM'
morph = analysis[i].morph + analysis[i + 1].morph
cmorph = CategorizedMorph(morph, tag)
self.temporaries.add(cmorph)
out = list(analysis[:i]) + [cmorph]
if len(analysis) > (i + 2):
out.extend(analysis[(i + 2):])
return out
def __eq__(self, other):
return type(self) == type(other)
# FIXME: badly named, should be NonmorphemeRemovalPostprocessor
class HeuristicPostprocessor(Postprocessor):
"""Heuristic post-processing to remove non-morphemes from the
final segmentation. Unlike in Morfessor Cat-ML,
this is not necessary during training for controlling model
complexity, but only as a post-processing step to ensure
meaningful categories.
"""
def __init__(self, max_join_stem_len=4):
super(HeuristicPostprocessor, self).__init__()
self.max_join_stem_len = max_join_stem_len
def apply_to(self, analysis, model):
"""Remove nonmorphemes from the analysis by joining or retagging
morphs, using heuristics."""
# Nothing to do if there are no nonmorphemes
if all([m.category != 'ZZZ' for m in analysis]):
return analysis
if len(analysis) == 1:
return (CategorizedMorph(analysis[0].morph, 'STM'),)
# Sequencs of ZZZs should be joined
analysis = self._join_sequences(analysis, model.forcesplit)
# Resulting long ZZZs are retagged as stems
self._long_to_stem(analysis, 4)
# Might be done at this point
if all(m.category != 'ZZZ' for m in analysis):
return analysis
# Retag parts of a multiple-suffix tail as SUF
self._tail_suffixes(analysis)
# If not: stronger measures are needed
# Force join remaining
analysis = self._force_join(analysis, model.forcesplit)
# Retag with non-morphemes forbidden
analysis = model.viterbi_tag(analysis, forbid_zzz=True)
return analysis
def _join_sequences(self, analysis, forcesplit):
"""Joins consecutive non-morphemes"""
prev = None
out = []
for m in analysis:
if (prev is None or
(m.category != 'ZZZ' or m.morph in forcesplit) or
(prev.morph in forcesplit) or
(prev.category != 'ZZZ')):
if prev is not None:
out.append(prev)
prev = m
continue
# prev is also a non-morpheme, and eligible for joining
prev = CategorizedMorph(prev.morph + m.morph, 'ZZZ')
if prev is not None:
out.append(prev)
return out
def _long_to_stem(self, analysis, min_len):
"""Converts long non-morphemes into stems. In-place operation."""
for m in analysis:
if m.category == 'ZZZ' and len(m.morph) >= min_len:
m.category = 'STM'
def _tail_suffixes(self, analysis):
"""Converts trailing non-morphemes into suffixes.
In-place operation.
"""
for (i, m) in enumerate(analysis):
if i == 0:
continue
if m.category == 'ZZZ' and analysis[i - 1].category == 'SUF':
if all(tail.category in ('SUF', 'ZZZ')
for tail in analysis[(i + 1):]):
m.category = 'SUF'
def _force_join(self, analysis, forcesplit):
"""Joins non-morphemes with previous or next morph"""
prev = None
out = []
if len(analysis) < 2:
return analysis
if (analysis[0].category == 'ZZZ' and
analysis[0].morph not in forcesplit and
analysis[1].morph not in forcesplit):
analysis = self._join_at(analysis, 0)
for m in analysis:
if prev is None:
prev = m
continue
if ((m.category != 'ZZZ' or m.morph in forcesplit) or
(prev.morph in forcesplit)):
if prev is not None:
out.append(prev)
prev = m
continue
# prev is eligible for joining
prev = CategorizedMorph(prev.morph + m.morph, 'ZZZ')
if prev is not None:
out.append(prev)
return out
class CompoundSegmentationPostprocessor(Postprocessor):
"""Postprocessor that makes FlatCat perform compound segmentation"""
def __init__(self, long_to_stems=True):
self._long_to_stems = long_to_stems
def apply_to(self, analysis, model=None):
if self._long_to_stems:
analysis = list(self.long_to_stems(analysis))
parts = self.split_compound(analysis)
out = []
for part in parts:
part = [morph.morph for morph in part]
part = ''.join(part)
out.append(CategorizedMorph(part, 'STM'))
return out
def long_to_stems(self, analysis):
for morph in analysis:
if morph.category == 'STM':
# avoids unnecessary NOOP re-wrapping
yield morph
elif len(morph) >= 5:
yield CategorizedMorph(morph.morph, 'STM')
else:
yield morph
def split_compound(self, analysis):
out = []
current = []
prev = None
for morph in analysis:
if prev is not None and prev != 'PRE':
if morph.category in ('PRE', 'STM'):
out.append(current)
current = []
current.append(morph)
prev = morph.category
out.append(current)
return out
class MorphContextBuilder(object):
"""Temporary structure used when calculating the MorphContexts."""
def __init__(self):
self.count = 0
self.left = collections.Counter()
self.right = collections.Counter()
@property
def left_perplexity(self):
return MorphContextBuilder._perplexity(self.left)
@property
def right_perplexity(self):
return MorphContextBuilder._perplexity(self.right)
@staticmethod
def _perplexity(contexts):
entropy = 0
if isinstance(contexts, int):
total_tokens = float(contexts)
contexts = {i: 1. for i in range(contexts)}
else:
total_tokens = float(sum(contexts.values()))
for c in contexts:
p = float(contexts[c]) / total_tokens
entropy -= p * math.log(p)
return math.exp(entropy)
class MorphUsageProperties(object):
"""This class describes how the prior probabilities are calculated
from the usage of morphs.
"""
# These transitions are impossible
zero_transitions = ((WORD_BOUNDARY, WORD_BOUNDARY),
('PRE', WORD_BOUNDARY),
('PRE', 'SUF'),
(WORD_BOUNDARY, 'SUF'))
# Adding these transitions removes the use of non-morphemes
forbid_zzz = ((WORD_BOUNDARY, 'ZZZ'),
('PRE', 'ZZZ'),
('STM', 'ZZZ'),
('SUF', 'ZZZ'))
# Cache for memoized valid transitions
_valid_transitions = None
def __init__(self, ppl_threshold=100, ppl_slope=None, length_threshold=3,
length_slope=2, type_perplexity=False,
min_perplexity_length=4, pre_ppl_threshold=None, uncapped_ppl=False):
"""Initialize the model parameters describing morph usage.
Arguments:
ppl_threshold : threshold value for sigmoid used to calculate
probabilities from left and right perplexities.
ppl_slope : Slope value for sigmoid used to calculate
probabilities from left and right perplexities.
length_threshold : threshold value for sigmoid used to calculate
probabilities from length of morph.
length_slope : Slope value for sigmoid used to calculate
probabilities from length of morph.
type_perplexity : If true, perplexity is based on word types,
If false, perplexity is based on word tokens.
min_perplexity_length : Morphs shorter than this length are
ignored when calculating perplexity.
pre_ppl_threshold: Separte ppl thresh for prefixes.
"""
if ppl_threshold is None:
self._ppl_threshold = None
else:
self._ppl_threshold = float(ppl_threshold)
if pre_ppl_threshold is None:
self._pre_ppl_threshold = self._ppl_threshold
else:
self._pre_ppl_threshold = float(pre_ppl_threshold)
self._length_threshold = float(length_threshold)
self._length_slope = float(length_slope)
self.type_perplexity = bool(type_perplexity)
self._min_perplexity_length = int(min_perplexity_length)
if ppl_slope is not None:
self._ppl_slope = float(ppl_slope)
self._pre_ppl_slope = self._ppl_slope
elif self._ppl_threshold is None:
self._ppl_slope = None
self._pre_ppl_slope = self._ppl_slope
else:
self._ppl_slope = 10.0 / self._ppl_threshold
self._pre_ppl_slope = 10.0 / self._pre_ppl_threshold
self._uncapped_ppl = uncapped_ppl
# Counts of different contexts in which a morph occurs
self._contexts = utils.Sparse(default=MorphContext(0, 1.0, 1.0))
self._context_builders = collections.defaultdict(MorphContextBuilder)
self._contexts_per_iter = 50000 # FIXME customizable
# Cache for memoized feature-based conditional class probabilities
self._condprob_cache = collections.defaultdict(float)
self._marginalizer = None
self._zlctc = None
def get_params(self):
"""Returns a dict of hyperparameters."""
params = {
'perplexity-threshold': self._ppl_threshold,
'pre-perplexity-threshold': self._pre_ppl_threshold,
'perplexity-slope': self._ppl_slope,
'pre-perplexity-slope': self._pre_ppl_slope,
'length-threshold': self._length_threshold,
'length-slope': self._length_slope,
'type-perplexity': self.type_perplexity,
'min-perplexity-length': self._min_perplexity_length}
return params
def set_params(self, params):
"""Sets hyperparameters to loaded values."""
params = {key: val for (key, val) in params.items()
if val is not None}
if 'perplexity-threshold' in params:
_logger.info('Setting perplexity-threshold to {}'.format(
params['perplexity-threshold']))
self._ppl_threshold = (float(params['perplexity-threshold']))
if 'pre-perplexity-threshold' in params:
_logger.info('Setting pre-perplexity-threshold to {}'.format(
params['pre-perplexity-threshold']))
self._pre_ppl_threshold = (float(
params['pre-perplexity-threshold']))
if 'perplexity-slope' in params:
_logger.info('Setting perplexity-slope to {}'.format(
params['perplexity-slope']))
self._ppl_slope = (float(params['perplexity-slope']))
if 'pre-perplexity-slope' in params:
_logger.info('Setting pre-perplexity-slope to {}'.format(
params['perplexity-slope']))
self._pre_ppl_slope = (float(params['pre-perplexity-slope']))
if 'length-threshold' in params:
_logger.info('Setting length-threshold to {}'.format(
params['length-threshold']))
self._length_threshold = (float(params['length-threshold']))
if 'length-slope' in params:
_logger.info('Setting length-slope to {}'.format(
params['length-slope']))
self._length_slope = (float(params['length-slope']))
if 'type-perplexity' in params:
_logger.info('Setting type-perplexity to {}'.format(
params['type-perplexity']))
self.type_perplexity = bool(params['type-perplexity'])
if 'min-perplexity-length' in params:
_logger.info('Setting min-perplexity-length to {}'.format(
params['min-perplexity-length']))
self._min_perplexity_length = (float(
params['min-perplexity-length']))
def calculate_usage_features(self, seg_func):
"""Calculate the usage features of morphs in the corpus."""
self.clear()
msg = 'Must set perplexity threshold'
assert self._ppl_threshold is not None, msg
if self._pre_ppl_threshold is None:
self._pre_ppl_threshold = self._ppl_threshold
while True:
# If risk of running out of memory, perform calculations in
# multiple loops over the data
conserving_memory = False
for rcount, segments in seg_func():
if not self.type_perplexity:
pcount = rcount
else:
# pcount used for perplexity, rcount is real count
pcount = 1
for (i, morph) in enumerate(segments):
# Collect information about the contexts in which
# the morphs occur.
if self._add_to_context(morph, pcount, rcount,
i, segments):
conserving_memory = True
self._compress_contexts()
if not conserving_memory:
break
def clear(self):
"""Resets the context variables.
Use before fully reprocessing a segmented corpus."""
self._contexts.clear()
self._context_builders.clear()
self._condprob_cache.clear()
self._marginalizer = None
self._zlctc = None
def _add_to_context(self, morph, pcount, rcount, i, segments):
"""Collect information about the contexts in which the morph occurs"""
if morph in self._contexts:
return False
if (len(self._context_builders) > self._contexts_per_iter and
morph not in self._context_builders):
return True
# Previous morph.
if i == 0:
# Word boundaries are counted as separate contexts
neighbour = WORD_BOUNDARY
else:
neighbour = segments[i - 1]
# Contexts shorter than threshold don't affect perplexity
if len(neighbour) < self._min_perplexity_length:
neighbour = None
if neighbour is not None:
self._context_builders[morph].left[neighbour] += pcount
# Next morph.
if i == len(segments) - 1:
neighbour = WORD_BOUNDARY
else:
neighbour = segments[i + 1]
if len(neighbour) < self._min_perplexity_length:
neighbour = None
if neighbour is not None:
self._context_builders[morph].right[neighbour] += pcount
self._context_builders[morph].count += rcount
return False
def _compress_contexts(self):
"""Calculate compact features from the context data collected into
_context_builders. This is done to save memory."""
for morph in self._context_builders:
tmp = self._context_builders[morph]
self._contexts[morph] = MorphContext(tmp.count,
tmp.left_perplexity,
tmp.right_perplexity)
self._context_builders.clear()
def condprobs(self, morph):
"""Calculate feature-based conditional probabilities P(Category|Morph)
from the contexts in which the morphs occur.
Arguments:
morph : A string representation of the morph type.
"""
if morph not in self._condprob_cache:
context = self._contexts[morph]
prelike = sigmoid(context.right_perplexity,
self._pre_ppl_threshold,
self._pre_ppl_slope)
suflike = sigmoid(context.left_perplexity,
self._ppl_threshold,
self._ppl_slope)
stmlike = sigmoid(len(morph),
self._length_threshold,
self._length_slope)
p_nonmorpheme = (1. - prelike) * (1. - suflike) * (1. - stmlike)
# assert 0 <= p_nonmorpheme <= 1
if p_nonmorpheme == 1:
p_pre = 0.0
p_suf = 0.0
p_stm = 0.0
else:
if p_nonmorpheme < 0.001:
p_nonmorpheme = 0.001
normcoeff = ((1.0 - p_nonmorpheme) /
((prelike ** 2) + (suflike ** 2) + (stmlike ** 2)))
p_pre = (prelike ** 2) * normcoeff
p_suf = (suflike ** 2) * normcoeff
p_stm = 1.0 - p_pre - p_suf - p_nonmorpheme
self._condprob_cache[morph] = ByCategory(p_pre, p_stm, p_suf,
p_nonmorpheme)
return self._condprob_cache[morph]
@property
def marginal_class_probs(self):
"""True distribution of class probabilities,
calculated by marginalizing over the feature based conditional
probabilities over all observed morphs.
This will not give the same result as the observed count based
calculation.
"""
return self._get_marginalizer().normalized()
@property
def category_token_count(self):
"""Un-normalized distribution of class probabilities,
the sum of which is the number of observed morphs.
See marginal_class_probs for the normalized version.
"""
return self._get_marginalizer().category_token_count
def zlog_category_token_count(self):
if self._zlctc is None:
self._zlctc = ByCategory(
*[utils.zlog(x) for x in self.category_token_count])
return self._zlctc
def _get_marginalizer(self):
if self._marginalizer is None:
self._marginalizer = Marginalizer()
for morph in self.seen_morphs():
self._marginalizer.add(self.count(morph),
self.condprobs(morph))
self._zlctc = None
return self._marginalizer
def feature_cost(self, morph):
"""The cost of encoding the necessary features along with a morph.
The length in characters of the morph is also a feature, but it does
not need to be encoded as it is available from the surface form.
"""
context = self._contexts[morph]
return (universalprior(context.right_perplexity) +
universalprior(context.left_perplexity))
def estimate_contexts(self, old_morphs, new_morphs, max_contexts=None):
"""Estimates context features for new unseen morphs.
Arguments:
old_morphs : A sequence of morphs being replaced. The existing
context of these morphs can be used in the
estimation.
new_morphs : A sequence of morphs that replaces the old ones.
Any previously unseen morphs in this sequence
will get context features estimated from their
surface form and/or from the contexts of the
old morphs they replace.
Returns:
A list of temporary morph contexts that have been estimated.
These should be removed by the caller if no longer necessary.
The removal is done using MorphContext.remove_temporaries.
"""
try:
uncapped_ppl = self._uncapped_ppl
except AttributeError:
uncapped_ppl = True
if not uncapped_ppl and max_contexts is not None:
max_ppl = MorphContextBuilder._perplexity(max_contexts)
temporaries = []
for (i, morph) in enumerate(new_morphs):
if morph in self:
# The morph already has real context: no need to estimate
continue
if i == 0:
# Prefix inherits left perplexity of leftmost parent
l_ppl = self._contexts[old_morphs[0]].left_perplexity
if not uncapped_ppl:
l_ppl = min(l_ppl, max_ppl)
else:
# Otherwise assume that the morph doesn't appear in any
# other contexts, which gives perplexity 1.0
l_ppl = 1.0
if i == len(new_morphs) - 1:
r_ppl = self._contexts[old_morphs[-1]].right_perplexity
if not uncapped_ppl:
r_ppl = min(r_ppl, max_ppl)
else:
r_ppl = 1.0
count = 0 # estimating does not add instances of the morph
self._contexts[morph] = MorphContext(count, l_ppl, r_ppl)
temporaries.append(morph)
return temporaries
@staticmethod
def context_type(prev_morph, next_morph, prev_cat, next_cat):
"""Cluster certain types of context, to allow making context-dependant
joining decisions."""
# This categorization scheme ignores prev_morph, next_morph,
# and only uses the categories
ctype = CONTEXT_TYPE_INTERNAL
if prev_cat == WORD_BOUNDARY or prev_cat == 'PRE':
ctype += CONTEXT_FLAG_INITIAL
if next_cat == WORD_BOUNDARY or next_cat == 'SUF':
ctype += CONTEXT_FLAG_FINAL
return ctype
### End of categorization-dependent code
########################################
# But not the end of the class:
# The methods in this class below this line are helpers that will
# probably not need to be modified if the categorization scheme changes
#
def remove_temporaries(self, temporaries):
"""Remove estimated temporary morph contexts when no longer needed."""
for morph in temporaries:
if morph not in self:
continue
msg = '{}: {}'.format(morph, self._contexts[morph].count)
assert self._contexts[morph].count == 0, msg
del self._contexts[morph]
if morph in self._condprob_cache:
del self._condprob_cache[morph]
def remove_zeros(self):
"""Remove context information for all morphs contexts with zero
count. This can save a bit more memory than just removing estimated
temporary contexts. Estimated context will be used for the removed
morphs for the rest of the iteration."""
remove_list = []
for morph in self._contexts.keys():
if self._contexts[morph].count == 0:
remove_list.append(morph)
for morph in remove_list:
del self._contexts[morph]
if morph in self._condprob_cache:
del self._condprob_cache[morph]
def seen_morphs(self):
"""All morphs that have defined contexts."""
return [morph for morph in self._contexts.keys()
if self._contexts[morph].count > 0]
def __contains__(self, morph):
return morph in self._contexts and self._contexts[morph].count > 0
def get_context_features(self, morph):
"""Returns the context features of a seen morph."""
return self._contexts[morph]
def count(self, morph):
"""The counts in the corpus of morphs with contexts."""
if morph not in self._contexts:
return 0
return self._contexts[morph].count
def set_count(self, morph, new_count):
"""Set the number of observed occurences of a morph.
Also updates the true category distribution.
"""
if self._marginalizer is not None and self.count(morph) > 0:
self._marginalizer.add(-self.count(morph),
self.condprobs(morph))
self._contexts[morph] = self._contexts[morph]._replace(count=new_count)
assert self.count(morph) >= 0, '{} subzero count'.format(morph)
if self._marginalizer is not None and self.count(morph) > 0:
self._marginalizer.add(self.count(morph),
self.condprobs(morph))
self._zlctc = None
@classmethod
def valid_transitions(cls):
"""Returns (and caches) all valid transitions as pairs
(from_category, to_category). Any transitions not included
in the list are forbidden, and must have count 0 and probability 0.
"""
if cls._valid_transitions is None:
cls._valid_transitions = []
categories = get_categories(wb=True)
for cat1 in categories:
for cat2 in categories:
if (cat1, cat2) in cls.zero_transitions:
continue
cls._valid_transitions.append((cat1, cat2))
cls._valid_transitions = tuple(cls._valid_transitions)
return cls._valid_transitions
class MaximumLikelihoodMorphUsage(object):
"""This is a replacement for MorphUsageProperties,
that uses ML-estimation to replace the property-based
conditional category probabilities.
"""
zero_transitions = MorphUsageProperties.zero_transitions
forbid_zzz = MorphUsageProperties.forbid_zzz
_valid_transitions = MorphUsageProperties._valid_transitions
def __init__(self, corpus_coding, param_dict):
self._corpus_coding = corpus_coding
self._param_dict = param_dict
self._seen = collections.defaultdict(int)
def get_params(self):
"""Returns a dict of hyperparameters."""
return self._param_dict
def set_params(self, params):
"""Sets hyperparameters to loaded values."""
self._param_dict = params
def clear(self):
self._seen.clear()
def calculate_usage_features(self, seg_func):
"""Recalculate morph counts"""
self._seen.clear()
for rcount, segments in seg_func():
for morph in segments:
self._seen[morph] += rcount
def feature_cost(self, morph):
"""The cost of encoding the necessary features along with a morph.
Always zero in the ML-estimation stage.
Exists for drop-in compatibility with MorphUsageProperties"""
return 0
def estimate_contexts(self, old_morphs, new_morphs):
"""Exists for drop-in compatibility with MorphUsageProperties"""
return []
def remove_temporaries(self, temporaries):
"""Exists for drop-in compatibility with MorphUsageProperties"""
pass
def remove_zeros(self):
"""Exists for drop-in compatibility with MorphUsageProperties"""
pass
def condprobs(self, morph):
"""Calculate feature-based conditional probabilities P(Category|Morph)
from the contexts in which the morphs occur.
Arguments:
morph : A string representation of the morph type.
"""
counts = self._corpus_coding.get_emission_counts(morph)
return self._normalize(counts)
@property
def marginal_class_probs(self):
"""True distribution of class probabilities,
calculated by marginalizing over the feature based conditional
probabilities over all observed morphs.
This will not give the same result as the observed count based
calculation.
"""
return self._normalize(self.category_token_count)
@property
def category_token_count(self):
"""Un-normalized distribution of class probabilities,
the sum of which is the number of observed morphs.
See marginal_class_probs for the normalized version.
"""
return ByCategory(
self._corpus_coding._cat_tagcount[category]
for category in get_categories())
@staticmethod
def _normalize(counts):
total = sum(counts)
assert total != 0
return ByCategory(*(float(x) / total for x in counts))
@staticmethod
def context_type(prev_morph, next_morph, prev_cat, next_cat):
"""Cluster certain types of context, to allow making context-dependant
joining decisions."""
return MorphUsageProperties.context_type(prev_morph, next_morph,
prev_cat, next_cat)
def seen_morphs(self):
"""All morphs that have defined emissions."""
return [morph for (morph, count) in self._seen.items()
if count > 0]
def __contains__(self, morph):
return morph in self._seen
def get_context_features(self, morph):
"""Returns dummy context features."""
return MorphContext(self.count(morph), 1., 1.)
def count(self, morph):
"""The counts in the corpus of morphs with contexts."""
if morph not in self._seen:
return 0
return self._seen[morph]
def set_count(self, morph, new_count):
"""Set the number of observed occurences of a morph.
Also updates the true category distribution.
"""
self._seen[morph] = new_count
@classmethod
def valid_transitions(cls):
"""Returns (and caches) all valid transitions as pairs
(from_category, to_category). Any transitions not included
in the list are forbidden, and must have count 0 and probability 0.
"""
return cls._valid_transitions
class CategorizedMorph(object):
"""Represents a morph with attached category information.
These objects should be treated as immutable, even though
it is not enforced by the code.
"""
__slots__ = ['morph', 'category']
def __init__(self, morph, category=None):
self.morph = morph
self.category = category
def __repr__(self):
if self.category is None:
return _str(self.morph)
return self.morph + '/' + self.category
def __eq__(self, other):
if not isinstance(other, CategorizedMorph):
return False
return (self.morph == other.morph and
self.category == other.category)
def __hash__(self):
return hash((self.morph, self.category))
def __len__(self):
return len(self.morph)
def __getitem__(self, i):
return self.morph[i]
def get_categories(wb=False):
"""The category tags supported by this model.
Argumments:
wb : If True, the word boundary will be included. Default: False.
"""
categories = list(ByCategory._fields)
if wb:
categories.append(WORD_BOUNDARY)
return categories
def sigmoid(value, threshold, slope):
return 1.0 / (1.0 + math.exp(-slope * (value - threshold)))
_LOG_C = math.log(2.865)
def universalprior(positive_number):
"""Compute the number of nats that are necessary for coding
a positive integer according to Rissanen's universal prior.
"""
return _LOG_C + math.log(positive_number)
class Marginalizer(object):
"""An accumulator for marginalizing the class probabilities
P(Category) from all the individual conditional probabilities
P(Category|Morph) and observed morph probabilities P(Morph).
First the unnormalized distribution is obtained by summing over
#(Morph) * P(Category|Morph) over each morph, separately for each
category. P(Category) is then obtained by normalizing the
distribution.
"""
def __init__(self):
self._counts = [0.0] * len(ByCategory._fields)
def add(self, rcount, condprobs):
"""Add the products #(Morph) * P(Category|Morph)
for one observed morph."""
for i, x in enumerate(condprobs):
self._counts[i] += float(rcount) * float(x)
def normalized(self):
"""Returns the marginal probabilities for all categories."""
total = self.total_token_count
return ByCategory(*[x / total for x in self._counts])
@property
def total_token_count(self):
"""Total number of tokens seen."""
return sum(self._counts)
@property
def category_token_count(self):
"""Tokens seen per category."""
return ByCategory(*self._counts)
def map_category(analysis, from_cat, to_cat):
"""Replaces all occurrences of the category from_cat with
to_cat, in the given analysis.
"""
out = []
for cmorph in analysis:
if cmorph.category == from_cat:
out.append(CategorizedMorph(cmorph.morph, to_cat))
else:
out.append(cmorph)
return tuple(out)
| {
"repo_name": "aalto-speech/flatcat",
"path": "flatcat/categorizationscheme.py",
"copies": "1",
"size": "36206",
"license": "bsd-2-clause",
"hash": -1441414253786755800,
"line_mean": 36.5970924195,
"line_max": 86,
"alpha_frac": 0.5864773794,
"autogenerated": false,
"ratio": 4.256024450452569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5342501829852568,
"avg_score": null,
"num_lines": null
} |
"""A Scheme interpreter and its read-eval-print loop."""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env, _=None): # Optional third argument is ignored
"""Evaluate Scheme expression EXPR in environment ENV.
>>> expr = read_line("(+ 2 2)")
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
4
"""
# Atoms
assert expr is not None
if scheme_symbolp(expr):
return env.lookup(expr)
elif self_evaluating(expr):
return expr
# Combinations
if not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
first, rest = expr.first, expr.second
if scheme_symbolp(first) and first in SPECIAL_FORMS:
result = SPECIAL_FORMS[first](rest, env)
else:
procedure = scheme_eval(first, env)
args = rest.map(lambda operand: scheme_eval(operand, env))
result = scheme_apply(procedure, args, env)
return result
def self_evaluating(expr):
"""Return whether EXPR evaluates to itself."""
return scheme_atomp(expr) or scheme_stringp(expr) or expr is okay
def scheme_apply(procedure, args, env):
"""Apply Scheme PROCEDURE to argument values ARGS in environment ENV."""
if isinstance(procedure, PrimitiveProcedure):
return apply_primitive(procedure, args, env)
elif isinstance(procedure, UserDefinedProcedure):
new_env = make_call_frame(procedure, args, env)
return eval_all(procedure.body, new_env)
else:
raise SchemeError("cannot call: {0}".format(str(procedure)))
def apply_primitive(procedure, args_scheme_list, env):
"""Apply PrimitiveProcedure PROCEDURE to ARGS_SCHEME_LIST in ENV.
>>> env = create_global_frame()
>>> plus = env.bindings["+"]
>>> twos = Pair(2, Pair(2, nil))
>>> apply_primitive(plus, twos, env)
4
"""
# Convert a Scheme list to a Python list
args = []
while args_scheme_list is not nil:
args.append(args_scheme_list.first)
args_scheme_list = args_scheme_list.second
# BEGIN Question 4
if procedure.use_env:
args.append(env)
try:
return procedure.fn(*args)
except TypeError:
raise SchemeError
# END Question 4
def eval_all(expressions, env):
"""Evaluate a Scheme list of EXPRESSIONS & return the value of the last."""
# BEGIN Question 7
"*** REPLACE THIS LINE ***"
if expressions == nil:
return okay
val = scheme_eval(expressions.first, env)
if expressions.second == nil:
return val
return eval_all(expressions.second, env)
# END Question 7
def make_call_frame(procedure, args, env):
"""Make a frame that binds the formal parameters of PROCEDURE to ARGS."""
# BEGIN Question 12
if isinstance(procedure, MuProcedure):
return env.make_child_frame(procedure.formals, args)
return procedure.env.make_child_frame(procedure.formals, args)
# END Question 12
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with a PARENT frame (which may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return "<Global Frame>"
else:
s = sorted('{0}: {1}'.format(k,v) for k,v in self.bindings.items())
return "<{{{0}}} -> {1}>".format(', '.join(s), repr(self.parent))
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found."""
# BEGIN Question 3
frame = self
while frame is not None and symbol not in frame.bindings:
frame = frame.parent
if frame is not None:
return frame.bindings[symbol]
# END Question 3
raise SchemeError("unknown identifier: {0}".format(symbol))
def make_child_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in a Scheme list of formal parameters FORMALS are bound to the Scheme
values in the Scheme list VALS. Raise an error if too many or too few
vals are given.
>>> env = create_global_frame()
>>> formals, expressions = read_line("(a b c)"), read_line("(1 2 3)")
>>> env.make_child_frame(formals, expressions)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
child = Frame(self) # Create a new child with self as the parent
# BEGIN Question 10
if len(formals) != len(vals):
raise SchemeError("Formal parameter and Arguments sizes don't match")
if not (formals == nil and vals == nil):
for para, argu in zip(formals, vals):
child.bindings[para] = argu
# END Question 10
return child
def define(self, symbol, value):
"""Define Scheme SYMBOL to have VALUE."""
self.bindings[symbol] = value
class UserDefinedProcedure:
"""A procedure defined by an expression."""
class LambdaProcedure(UserDefinedProcedure):
"""A procedure defined by a lambda expression or a define form."""
def __init__(self, formals, body, env):
"""A procedure with formal parameter list FORMALS (a Scheme list),
a Scheme list of BODY expressions, and a parent environment that
starts with Frame ENV.
"""
self.formals = formals
self.body = body
self.env = env
def __str__(self):
return str(Pair("lambda", Pair(self.formals, self.body)))
def __repr__(self):
return "LambdaProcedure({!r}, {!r}, {!r})".format(
self.formals, self.body, self.env)
#################
# Special forms #
#################
def do_define_form(expressions, env):
"""Evaluate a define form."""
check_form(expressions, 2)
target = expressions.first
value_expr = expressions.second.first
if scheme_symbolp(target):
check_form(expressions, 2, 2)
# BEGIN Question 5A
env.define(target, scheme_eval(value_expr, env))
return target
# END Question 5A
elif isinstance(target, Pair) and scheme_symbolp(target.first):
# BEGIN Question 9A
formals, body = target.second, expressions.second
lambdaproc = LambdaProcedure(formals, body, env)
env.define(target.first, lambdaproc)
return target.first
# END Question 9A
else:
bad = target.first if isinstance(target, Pair) else target
raise SchemeError("Non-symbol: {}".format(bad))
def do_quote_form(expressions, env):
"""Evaluate a quote form."""
check_form(expressions, 1, 1)
# BEGIN Question 6B
value_expr = expressions.first
return value_expr
# END Question 6B
def do_begin_form(expressions, env):
"""Evaluate begin form."""
check_form(expressions, 1)
return eval_all(expressions, env)
def do_lambda_form(expressions, env):
"""Evaluate a lambda form."""
check_form(expressions, 2)
formals = expressions.first
check_formals(formals)
# BEGIN Question 8
body = expressions.second
return LambdaProcedure(formals, body, env)
# END Question 8
def do_if_form(expressions, env):
"""Evaluate an if form."""
check_form(expressions, 2, 3)
# BEGIN Question 13
predicate, tbranch = expressions.first, expressions.second.first
if scheme_true(scheme_eval(predicate, env)):
return scheme_eval(tbranch, env)
else:
if expressions.second.second == nil:
return okay
return scheme_eval(expressions.second.second.first, env)
# END Question 13
def do_and_form(expressions, env):
"""Evaluate a short-circuited and form."""
# BEGIN Question 14B
if expressions == nil:
return True
cur, subexpr = expressions.first, expressions.second
evaluation = scheme_eval(cur, env)
if scheme_true(evaluation):
if subexpr == nil:
return evaluation
else:
return do_and_form(subexpr, env)
else:
return False
# END Question 14B
def do_or_form(expressions, env):
"""Evaluate a short-circuited or form."""
# BEGIN Question 14B
if expressions == nil:
return False
cur, subexpr = expressions.first, expressions.second
evaluation = scheme_eval(cur, env)
if scheme_true(evaluation):
return evaluation
else:
return do_or_form(subexpr, env)
# END Question 14B
def do_cond_form(expressions, env):
"""Evaluate a cond form."""
num_clauses = len(expressions)
i = 0
while expressions is not nil:
clause = expressions.first
check_form(clause, 1)
if clause.first == "else":
if i < num_clauses-1:
raise SchemeError("else must be last")
test = True
else:
test = scheme_eval(clause.first, env)
# We don't need to declare test to use it in
# current environment!
if scheme_true(test):
# BEGIN Question 15A
if clause.second == nil:
return test
return eval_all(clause.second, env)
# END Question 15A
expressions = expressions.second
i += 1
return okay
def do_let_form(expressions, env):
"""Evaluate a let form."""
check_form(expressions, 2)
let_env = make_let_frame(expressions.first, env)
return eval_all(expressions.second, let_env)
def make_let_frame(bindings, env):
"""Create a frame containing bindings from a let expression."""
if not scheme_listp(bindings):
raise SchemeError("bad bindings list in let form")
# BEGIN Question 16
formals = Pair(nil, nil)
args = Pair(nil, nil)
f, a = formals, args
while bindings is not nil:
clause = bindings.first
check_form(clause, 2, 2)
if not scheme_symbolp(clause.first):
raise SchemeError("{0} is Not a symbol for let expression".format(clause.first))
f.second = Pair(clause.first, nil)
f = f.second
a.second = Pair(scheme_eval(clause.second.first, env), nil)
a = a.second
bindings = bindings.second
return env.make_child_frame(formals.second, args.second)
# END Question 16
SPECIAL_FORMS = {
"and": do_and_form,
"begin": do_begin_form,
"cond": do_cond_form,
"define": do_define_form,
"if": do_if_form,
"lambda": do_lambda_form,
"let": do_let_form,
"or": do_or_form,
"quote": do_quote_form,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max=float('inf')):
"""Check EXPR is a proper list whose length is at least MIN and no more
than MAX (default: no maximum). Raises a SchemeError if this is not the
case.
"""
if not scheme_listp(expr):
raise SchemeError("badly formed expression: " + str(expr))
length = len(expr)
if length < min:
raise SchemeError("too few operands in form")
elif length > max:
raise SchemeError("too many operands in form")
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of
formals is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line("(a b c)"))
"""
# BEGIN Question 11B
def check_formals_helper(formals, used):
if formals == nil:
return True
elif not (isinstance(formals.second, Pair) or formals.second == nil):
raise SchemeError("Not a well-formed list of parameters")
if not scheme_symbolp(formals.first):
raise SchemeError("{0} is Not a valid symbol in parameters".format(formals.first))
elif formals.first in used:
raise SchemeError("{0} is already in previous parameters list".format(formals.first))
used.add(formals.first)
return check_formals_helper(formals.second, used)
check_formals_helper(formals, set())
# END Question 11B
#################
# Dynamic Scope #
#################
class MuProcedure(UserDefinedProcedure):
"""A procedure defined by a mu expression, which has dynamic scope.
_________________
< Scheme is cool! >
-----------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
"""
def __init__(self, formals, body):
"""A procedure with formal parameter list FORMALS (a Scheme list) and a
Scheme list of BODY expressions.
"""
self.formals = formals
self.body = body
def __str__(self):
return str(Pair("mu", Pair(self.formals, self.body)))
def __repr__(self):
return "MuProcedure({!r}, {!r})".format(self.formals, self.body)
def do_mu_form(expressions, env):
"""Evaluate a mu form."""
check_form(expressions, 2)
formals = expressions.first
body = expressions.second
check_formals(formals)
# BEGIN Question 17
return MuProcedure(formals, body)
# END Question 17
SPECIAL_FORMS["mu"] = do_mu_form
##################
# Tail Recursion #
##################
class Evaluate:
"""An expression EXPR to be evaluated in environment ENV."""
def __init__(self, expr, env):
self.expr = expr
self.env = env
def scheme_optimized_eval(expr, env, tail=False):
"""Evaluate Scheme expression EXPR in environment ENV."""
# Evaluate Atoms
assert expr is not None
if scheme_symbolp(expr):
return env.lookup(expr)
elif self_evaluating(expr):
return expr
if tail:
# BEGIN Extra Credit
"*** REPLACE THIS LINE ***"
# END Extra Credit
else:
result = Evaluate(expr, env)
while isinstance(result, Evaluate):
expr, env = result.expr, result.env
# All non-atomic expressions are lists (combinations)
if not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
first, rest = expr.first, expr.second
if (scheme_symbolp(first) and first in SPECIAL_FORMS):
result = SPECIAL_FORMS[first](rest, env)
else:
procedure = scheme_eval(first, env)
args = rest.map(lambda operand: scheme_eval(operand, env))
result = scheme_apply(procedure, args, env)
return result
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
# scheme_eval = scheme_optimized_eval
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, interactive=False, quiet=False,
startup=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(filename, True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in getattr(err, 'args')[0]):
raise
elif isinstance(err, RuntimeError):
print("Error: maximum recursion depth exceeded")
else:
print("Error:", err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print()
print("KeyboardInterrupt")
if not interactive:
return
except EOFError: # <Control>-D, etc.
print()
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
expressions = args[:-1]
raise SchemeError('"load" given incorrect number of arguments: '
'{0}'.format(len(expressions)))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = eval(sym)
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(sym) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env, quiet=quiet)
return okay
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define("eval", PrimitiveProcedure(scheme_eval, True))
env.define("apply", PrimitiveProcedure(scheme_apply, True))
env.define("load", PrimitiveProcedure(scheme_load, True))
add_primitives(env)
return env
@main
def run(*argv):
import argparse
parser = argparse.ArgumentParser(description='CS 61A Scheme interpreter')
parser.add_argument('-load', '-i', action='store_true',
help='run file interactively')
parser.add_argument('file', nargs='?',
type=argparse.FileType('r'), default=None,
help='Scheme file to run')
args = parser.parse_args()
next_line = buffer_input
interactive = True
load_files = []
if args.file is not None:
if args.load:
load_files.append(getattr(args.file, 'name'))
else:
lines = args.file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
| {
"repo_name": "tavaresdong/courses",
"path": "ucb_cs61A/projects/scheme/scheme.py",
"copies": "3",
"size": "18830",
"license": "mit",
"hash": -855015925241671200,
"line_mean": 32.2685512367,
"line_max": 97,
"alpha_frac": 0.5996813595,
"autogenerated": false,
"ratio": 3.960042060988433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6059723420488432,
"avg_score": null,
"num_lines": null
} |
"""A Scientific Calculator
type h for help
Basic symbols:
-> x : multiplication
-> + : addition
-> - : subtraction
-> / : (float) division
-> mod : modulo
-> sin : sine
-> cos : cosine
-> tan : tangent
-> ^ : power
-> 2^ : power of 2
"""
# This could get really fun!
# TODO Make curses interface
# TODO Check if/how to work with a stream i/o, i.e. without newline
# TODO Add floats
import math
binary = ["x",
"+",
"-",
"/",
"mod",
"^"]
unary = ["sin",
"cos",
"tan",
"2^"]
symbols = binary + unary
prompt = "Calc> "
# TODO def readPrompt(situation)
def isNumber(n):
try:
float(n)
# print str(n) + " is a number!"
return True
except ValueError:
return False
def op(symbol, x):
if symbol in binary:
# TODO refactor all of this with recursion over op()
# probably should use trees as well (boy that'd be fun)
g = raw_input(prompt)
if isNumber(g):
y = eval(g) # to deal with floats
else:
print "Error!"
return
if symbol == "x":
return x * y
elif symbol == "+":
return x + y
elif symbol == "-":
return x - y
elif symbol == "/":
return x / y
elif symbol == "mod":
return x % y
elif symbol == "^":
return pow(x, y)
elif symbol in unary:
if symbol == "sin":
return math.sin(x)
elif symbol == "cos":
return math.cos(x)
elif symbol == "tan":
return math.tan(x)
elif symbol == "2^":
return pow(2, x)
elif isNumber(symbol):
return eval(symbol)
elif symbol == "":
return x
else:
print "Error"
return x
def main():
print __doc__
while 1:
while 1:
g = raw_input(prompt)
if g == "h" or g == "help":
print __doc__
elif isNumber(g):
x = eval(g)
print x
break
elif g == "":
print ""
else:
print "Error, symbol not known. Value required."
while 1:
if x is not None:
x = op(raw_input(prompt), x)
print x
else:
break
if __name__ == "__main__":
main() | {
"repo_name": "edran/ProjectsForTeaching",
"path": "Numbers/calculator.py",
"copies": "1",
"size": "2565",
"license": "mit",
"hash": 7801913651204185000,
"line_mean": 20.2066115702,
"line_max": 68,
"alpha_frac": 0.428460039,
"autogenerated": false,
"ratio": 4.150485436893204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5078945475893204,
"avg_score": null,
"num_lines": null
} |
# ascii animation of zooming a mandelbrot fractal, z=z^2+c
from __future__ import print_function, division
import os
import time
import platform
from server import Mandelbrot
res_x = 100
res_y = 40
def screen(start, width):
mandel = Mandelbrot()
dr = width / res_x
di = dr*(res_x/res_y)
di *= 0.8 # aspect ratio correction
lines = mandel.calc_lines(start, res_x, dr, di, 0, res_y)
return "\n".join(x[1] for x in lines)
def cls():
if platform.platform().startswith("Windows"):
os.system("cls")
else:
print(chr(27)+"[2J"+chr(27)+"[1;1H", end="") # ansi clear screen
def zoom():
start = -2.0-1.0j
width = 3.0
duration = 30.0
wallclock_start = time.time()
frames = 0
cls()
print("This is a mandelbrot zoom animation running without Pyro, in a single Python process.")
time.sleep(2)
while True:
time_passed = time.time() - wallclock_start
if time_passed >= duration:
break
actual_width = width * (1-time_passed/duration/1.1)
actual_start = start + (0.06-0.002j)*time_passed
frame = screen(actual_start, actual_width)
cls()
fps = frames/time_passed if time_passed > 0 else 0
print("%.1f FPS time=%.2f width=%.2f" % (fps, time_passed, actual_width))
print(frame)
frames += 1
print("Final FPS: %.2f" % fps)
if __name__ == "__main__":
zoom()
| {
"repo_name": "irmen/Pyro4",
"path": "examples/distributed-mandelbrot/normal.py",
"copies": "1",
"size": "1434",
"license": "mit",
"hash": -7215749176698993000,
"line_mean": 26.0566037736,
"line_max": 98,
"alpha_frac": 0.5927475593,
"autogenerated": false,
"ratio": 3.1447368421052633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4237484401405264,
"avg_score": null,
"num_lines": null
} |
# ascii animation of zooming a mandelbrot fractal, z=z^2+c
from __future__ import print_function, division
import os
import time
import threading
import platform
import Pyro4
class MandelZoomer(object):
res_x = 100
res_y = 40
def __init__(self):
self.num_lines_lock = threading.Lock()
self.num_lines_ready = 0
self.all_lines_ready = threading.Event()
self.result = []
with Pyro4.locateNS() as ns:
mandels = ns.list(metadata_any={"class:mandelbrot_calc"})
mandels = list(mandels.items())
print("{0} mandelbrot calculation servers found.".format(len(mandels)))
if not mandels:
raise ValueError("launch at least one mandelbrot calculation server before starting this")
time.sleep(2)
self.mandels = [Pyro4.Proxy(uri) for _, uri in mandels]
def batch_result(self, results):
num_result_lines = 0
for linenr, line in results:
self.result[linenr] = line
num_result_lines += 1
with self.num_lines_lock:
self.num_lines_ready += num_result_lines
if self.num_lines_ready >= self.res_y:
self.all_lines_ready.set()
def screen(self, start, width):
dr = width / self.res_x
di = dr*(self.res_x/self.res_y)
di *= 0.8 # aspect ratio correction
self.num_lines_ready = 0
self.all_lines_ready.clear()
self.result = ["?"] * self.res_y
servers = [Pyro4.batch(proxy) for proxy in self.mandels]
for i in range(self.res_y):
server = servers[i % len(servers)]
server.calc_line(start, self.res_x, i*di, dr, i)
for batch in servers:
batch(asynchronous=True).then(self.batch_result)
self.all_lines_ready.wait(timeout=5)
return "\n".join(self.result)
def cls(self):
if platform.platform().startswith("Windows"):
os.system("cls")
else:
print(chr(27)+"[2J"+chr(27)+"[1;1H", end="") # ansi clear screen
if __name__ == "__main__":
start = -2.0-1.0j
width = 3.0
duration = 30.0
wallclock_start = time.time()
frames = 0
zoomer = MandelZoomer()
zoomer.cls()
print("This is a mandelbrot zoom animation running using Pyro, it will use all calculation server processes that are available.")
while True:
time_passed = time.time() - wallclock_start
if time_passed >= duration:
break
actual_width = width * (1-time_passed/duration/1.1)
actual_start = start + (0.06-0.002j)*time_passed
frame = zoomer.screen(actual_start, actual_width)
zoomer.cls()
fps = frames/time_passed if time_passed > 0 else 0
print("%.1f FPS time=%.2f width=%.2f" % (fps, time_passed, actual_width))
print(frame)
frames += 1
print("Final FPS: %.2f" % fps)
| {
"repo_name": "irmen/Pyro4",
"path": "examples/distributed-mandelbrot/client_asciizoom.py",
"copies": "1",
"size": "2913",
"license": "mit",
"hash": 891772748913523000,
"line_mean": 34.0963855422,
"line_max": 133,
"alpha_frac": 0.5894267079,
"autogenerated": false,
"ratio": 3.3872093023255814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4476636010225581,
"avg_score": null,
"num_lines": null
} |
# ascii animation of zooming a mandelbrot fractal, z=z^2+c
import os
import time
import platform
from concurrent import futures
from Pyro5.api import locate_ns, Proxy, BatchProxy
class MandelZoomer(object):
res_x = 100
res_y = 40
def __init__(self):
self.result = []
with locate_ns() as ns:
mandels = ns.yplookup(meta_any={"class:mandelbrot_calc"})
self.mandels = [uri for _, (uri, meta) in mandels.items()]
print("{0} mandelbrot calculation servers found.".format(len(self.mandels)))
if not mandels:
raise ValueError("launch at least one mandelbrot calculation server before starting this")
time.sleep(2)
def screen(self, start, width):
dr = width / self.res_x
di = dr*(self.res_x/self.res_y)
di *= 0.8 # aspect ratio correction
self.result = ["?"] * self.res_y
servers = [BatchProxy(Proxy(uri)) for uri in self.mandels]
with futures.ThreadPoolExecutor(max_workers=len(servers)*2) as pool:
for i in range(self.res_y):
server = servers[i % len(servers)]
server.calc_line(start, self.res_x, i*di, dr, i)
tasks = [pool.submit(server) for server in servers]
for task in futures.as_completed(tasks):
lines = task.result()
for (linenr, line) in lines:
self.result[linenr] = line
return "\n".join(self.result)
def cls(self):
if platform.platform().startswith("Windows"):
os.system("cls")
else:
print(chr(27)+"[2J"+chr(27)+"[1;1H", end="") # ansi clear screen
if __name__ == "__main__":
start = -2.0-1.0j
width = 3.0
duration = 30.0
wallclock_start = time.time()
frames = 0
zoomer = MandelZoomer()
zoomer.cls()
print("This is a mandelbrot zoom animation running using Pyro, it will use all calculation server processes that are available.")
while True:
time_passed = time.time() - wallclock_start
if time_passed >= duration:
break
actual_width = width * (1-time_passed/duration/1.1)
actual_start = start + (0.06-0.002j)*time_passed
frame = zoomer.screen(actual_start, actual_width)
zoomer.cls()
fps = frames/time_passed if time_passed > 0 else 0
print("%.1f FPS time=%.2f width=%.2f" % (fps, time_passed, actual_width))
print(frame)
frames += 1
print("Final FPS: %.2f" % fps)
| {
"repo_name": "irmen/Pyro5",
"path": "examples/distributed-mandelbrot/client_asciizoom.py",
"copies": "1",
"size": "2525",
"license": "mit",
"hash": -7699707067124481000,
"line_mean": 35.5942028986,
"line_max": 133,
"alpha_frac": 0.5865346535,
"autogenerated": false,
"ratio": 3.4400544959128063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9451144855770428,
"avg_score": 0.015088858728475572,
"num_lines": 69
} |
# ascii animation of zooming a mandelbrot fractal, z=z^2+c
import os
import time
import platform
from server import Mandelbrot
res_x = 100
res_y = 40
def screen(start, width):
mandel = Mandelbrot()
dr = width / res_x
di = dr*(res_x/res_y)
di *= 0.8 # aspect ratio correction
lines = mandel.calc_lines(start, res_x, dr, di, 0, res_y)
return "\n".join(x[1] for x in lines)
def cls():
if platform.platform().startswith("Windows"):
os.system("cls")
else:
print(chr(27)+"[2J"+chr(27)+"[1;1H", end="") # ansi clear screen
def zoom():
start = -2.0-1.0j
width = 3.0
duration = 30.0
wallclock_start = time.time()
frames = 0
fps = 0
cls()
print("This is a mandelbrot zoom animation running without Pyro, in a single Python process.")
time.sleep(2)
while True:
time_passed = time.time() - wallclock_start
if time_passed >= duration:
break
actual_width = width * (1-time_passed/duration/1.1)
actual_start = start + (0.06-0.002j)*time_passed
frame = screen(actual_start, actual_width)
cls()
fps = frames/time_passed if time_passed > 0 else 0
print("%.1f FPS time=%.2f width=%.2f" % (fps, time_passed, actual_width))
print(frame)
frames += 1
print("Final FPS: %.2f" % fps)
if __name__ == "__main__":
zoom()
| {
"repo_name": "irmen/Pyro5",
"path": "examples/distributed-mandelbrot/normal.py",
"copies": "1",
"size": "1395",
"license": "mit",
"hash": -4141089522050098000,
"line_mean": 24.8333333333,
"line_max": 98,
"alpha_frac": 0.5856630824,
"autogenerated": false,
"ratio": 3.1,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9183598625728497,
"avg_score": 0.00041289133430054837,
"num_lines": 54
} |
# ASCII Art Generator (Image to ASCII Art Converter)
# FB - 20160925
import sys
if len(sys.argv) != 3:
print "USAGE:"
print "[python] img2asciiart.py InputImageFileName OutputTextFileName"
print "Use quotes if file paths/names contain spaces!"
sys.exit()
inputImageFileName = sys.argv[1]
OutputTextFileName = sys.argv[2]
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.load_default() # load default bitmap monospaced font
(chrx, chry) = font.getsize(chr(32))
# calculate weights of ASCII chars
weights = []
for i in range(32, 127):
chrImage = font.getmask(chr(i))
ctr = 0
for y in range(chry):
for x in range(chrx):
if chrImage.getpixel((x, y)) > 0:
ctr += 1
weights.append(float(ctr) / (chrx * chry))
image = Image.open(inputImageFileName)
(imgx, imgy) = image.size
imgx = int(imgx / chrx)
imgy = int(imgy / chry)
# NEAREST/BILINEAR/BICUBIC/ANTIALIAS
image = image.resize((imgx, imgy), Image.BICUBIC)
image = image.convert("L") # convert to grayscale
pixels = image.load()
output = open(OutputTextFileName, "w")
for y in range(imgy):
for x in range(imgx):
w = float(pixels[x, y]) / 255
# find closest weight match
wf = -1.0; k = -1
for i in range(len(weights)):
if abs(weights[i] - w) <= abs(wf - w):
wf = weights[i]; k = i
output.write(chr(k + 32))
output.write("\n")
output.close()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/580702_Image_to_ASCII_Art_Converter/recipe-580702.py",
"copies": "1",
"size": "1444",
"license": "mit",
"hash": 7518116511972968000,
"line_mean": 31.0888888889,
"line_max": 74,
"alpha_frac": 0.6350415512,
"autogenerated": false,
"ratio": 3.0658174097664546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4200858960966455,
"avg_score": null,
"num_lines": null
} |
""" ascii based histogram generator for quick inspection of
distributions.
"""
## give it a function API so it is easy and quick to use.
class Histogram(object):
CHAR = '*'
BIN_COUNT = 70
TICK_PRECISION = 2
MAX_HEIGHT = 100
def __init__(self,
data,
char = None,
bin_count = None,
tick_precision = None,
max_height = None,
):
data.sort()
self.data = data
self.char = char or self.CHAR
self.bin_count = bin_count or self.BIN_COUNT
self.tick_precision = tick_precision or self.TICK_PRECISION
self.max_height = max_height or self.MAX_HEIGHT
self.data_max = max(data)
self.data_min = min(data)
self.data_count = len(data)
self.bin_bounds = self._create_bin_bounds()
self.ticks = self._create_ticks(self.bin_bounds, self.data_min)
self.histogram = self._histogram()
def _create_bin_bounds(self, count = None):
count = count or self.bin_count
width = (self.data_max - self.data_min) /float(count)
bin_bounds = [self.data_min + width * (i + 1) for i in xrange(count)]
return bin_bounds
def _create_ticks(self, bin_bounds, data_min):
out = []
last = data_min
for v in bin_bounds:
out.append((v + last)/2.0)
last = v
return out
def _histogram(self):
""" no optimizations for now.
"""
bin_count = self.bin_count
hist = [0] * bin_count
data = self.data
bin_bounds = self.bin_bounds
for value in data:
positions = [i for (i, bound) in enumerate(bin_bounds) if value < bound]
if positions:
hist[min(positions)] += 1
## normalize
ratio = self.max_height / float(max(hist))
norm_hist = [i * ratio for i in hist]
return norm_hist
def pprint(self):
tick_precision = self.tick_precision
tick_fill = max([len('%f' % round(v, tick_precision)) for v in self.ticks])
lines = []
for (_bin, _tick) in zip(self.histogram, self.ticks):
line = '{0:{tick_fill},.{tick_precision}f}: {1:*>{bar_length}}'.format(
_tick,
'',
tick_fill = tick_fill,
tick_precision = tick_precision,
bar_length = round(_bin, 0),
)
lines.append(line)
print '\n'.join(lines)
def histogram(data, bin_count = None):
hist = Histogram(data = data, bin_count = bin_count)
hist.pprint()
def test():
import random
data = [random.gauss(3.3, 2) for i in range(10000)]
histogram(data)
print '\n\n'
data = [random.lognormvariate(0, .7) for i in range(10000)]
histogram(data)
print '\n\n'
data = [random.paretovariate(100) for i in range(10000)]
histogram(data)
print '\n\n'
data = [int(random.gauss(0, 2)) for i in range(10000)]
histogram(data)
print '\n\n'
if __name__ == '__main__':
test()
| {
"repo_name": "pavlos-christoforou/quietcasting",
"path": "lib/histogram.py",
"copies": "1",
"size": "3223",
"license": "mit",
"hash": -7713816690293762000,
"line_mean": 21.3819444444,
"line_max": 84,
"alpha_frac": 0.5218740304,
"autogenerated": false,
"ratio": 3.760793465577596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4782667495977596,
"avg_score": null,
"num_lines": null
} |
"""ASCII, Dammit
Stupid library to turn MS chars (like smart quotes) and ISO-Latin
chars into ASCII, dammit. Will do plain text approximations, or more
accurate HTML representations. Can also be jiggered to just fix the
smart quotes and leave the rest of ISO-Latin alone.
Sources:
http://www.cs.tut.fi/~jkorpela/latin1/all.html
http://www.webreference.com/html/reference/character/isolat1.html
1.0 Initial Release (2004-11-28)
The author hereby irrevocably places this work in the public domain.
To the extent that this statement does not divest the copyright,
the copyright holder hereby grants irrevocably to every recipient
all rights in this work otherwise reserved under copyright.
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2009/04/28 10:45:03 $"
__license__ = "Public domain"
import re
import string
import types
CHARS = { '\x80' : ('EUR', 'euro'),
'\x81' : ' ',
'\x82' : (',', 'sbquo'),
'\x83' : ('f', 'fnof'),
'\x84' : (',,', 'bdquo'),
'\x85' : ('...', 'hellip'),
'\x86' : ('+', 'dagger'),
'\x87' : ('++', 'Dagger'),
'\x88' : ('^', 'caret'),
'\x89' : '%',
'\x8A' : ('S', 'Scaron'),
'\x8B' : ('<', 'lt;'),
'\x8C' : ('OE', 'OElig'),
'\x8D' : '?',
'\x8E' : 'Z',
'\x8F' : '?',
'\x90' : '?',
'\x91' : ("'", 'lsquo'),
'\x92' : ("'", 'rsquo'),
'\x93' : ('"', 'ldquo'),
'\x94' : ('"', 'rdquo'),
'\x95' : ('*', 'bull'),
'\x96' : ('-', 'ndash'),
'\x97' : ('--', 'mdash'),
'\x98' : ('~', 'tilde'),
'\x99' : ('(TM)', 'trade'),
'\x9a' : ('s', 'scaron'),
'\x9b' : ('>', 'gt'),
'\x9c' : ('oe', 'oelig'),
'\x9d' : '?',
'\x9e' : 'z',
'\x9f' : ('Y', 'Yuml'),
'\xa0' : (' ', 'nbsp'),
'\xa1' : ('!', 'iexcl'),
'\xa2' : ('c', 'cent'),
'\xa3' : ('GBP', 'pound'),
'\xa4' : ('$', 'curren'), #This approximation is especially lame.
'\xa5' : ('YEN', 'yen'),
'\xa6' : ('|', 'brvbar'),
'\xa7' : ('S', 'sect'),
'\xa8' : ('..', 'uml'),
'\xa9' : ('', 'copy'),
'\xaa' : ('(th)', 'ordf'),
'\xab' : ('<<', 'laquo'),
'\xac' : ('!', 'not'),
'\xad' : (' ', 'shy'),
'\xae' : ('(R)', 'reg'),
'\xaf' : ('-', 'macr'),
'\xb0' : ('o', 'deg'),
'\xb1' : ('+-', 'plusmm'),
'\xb2' : ('2', 'sup2'),
'\xb3' : ('3', 'sup3'),
'\xb4' : ("'", 'acute'),
'\xb5' : ('u', 'micro'),
'\xb6' : ('P', 'para'),
'\xb7' : ('*', 'middot'),
'\xb8' : (',', 'cedil'),
'\xb9' : ('1', 'sup1'),
'\xba' : ('(th)', 'ordm'),
'\xbb' : ('>>', 'raquo'),
'\xbc' : ('1/4', 'frac14'),
'\xbd' : ('1/2', 'frac12'),
'\xbe' : ('3/4', 'frac34'),
'\xbf' : ('?', 'iquest'),
'\xc0' : ('A', "Agrave"),
'\xc1' : ('A', "Aacute"),
'\xc2' : ('A', "Acirc"),
'\xc3' : ('A', "Atilde"),
'\xc4' : ('A', "Auml"),
'\xc5' : ('A', "Aring"),
'\xc6' : ('AE', "Aelig"),
'\xc7' : ('C', "Ccedil"),
'\xc8' : ('E', "Egrave"),
'\xc9' : ('E', "Eacute"),
'\xca' : ('E', "Ecirc"),
'\xcb' : ('E', "Euml"),
'\xcc' : ('I', "Igrave"),
'\xcd' : ('I', "Iacute"),
'\xce' : ('I', "Icirc"),
'\xcf' : ('I', "Iuml"),
'\xd0' : ('D', "Eth"),
'\xd1' : ('N', "Ntilde"),
'\xd2' : ('O', "Ograve"),
'\xd3' : ('O', "Oacute"),
'\xd4' : ('O', "Ocirc"),
'\xd5' : ('O', "Otilde"),
'\xd6' : ('O', "Ouml"),
'\xd7' : ('*', "times"),
'\xd8' : ('O', "Oslash"),
'\xd9' : ('U', "Ugrave"),
'\xda' : ('U', "Uacute"),
'\xdb' : ('U', "Ucirc"),
'\xdc' : ('U', "Uuml"),
'\xdd' : ('Y', "Yacute"),
'\xde' : ('b', "Thorn"),
'\xdf' : ('B', "szlig"),
'\xe0' : ('a', "agrave"),
'\xe1' : ('a', "aacute"),
'\xe2' : ('a', "acirc"),
'\xe3' : ('a', "atilde"),
'\xe4' : ('a', "auml"),
'\xe5' : ('a', "aring"),
'\xe6' : ('ae', "aelig"),
'\xe7' : ('c', "ccedil"),
'\xe8' : ('e', "egrave"),
'\xe9' : ('e', "eacute"),
'\xea' : ('e', "ecirc"),
'\xeb' : ('e', "euml"),
'\xec' : ('i', "igrave"),
'\xed' : ('i', "iacute"),
'\xee' : ('i', "icirc"),
'\xef' : ('i', "iuml"),
'\xf0' : ('o', "eth"),
'\xf1' : ('n', "ntilde"),
'\xf2' : ('o', "ograve"),
'\xf3' : ('o', "oacute"),
'\xf4' : ('o', "ocirc"),
'\xf5' : ('o', "otilde"),
'\xf6' : ('o', "ouml"),
'\xf7' : ('/', "divide"),
'\xf8' : ('o', "oslash"),
'\xf9' : ('u', "ugrave"),
'\xfa' : ('u', "uacute"),
'\xfb' : ('u', "ucirc"),
'\xfc' : ('u', "uuml"),
'\xfd' : ('y', "yacute"),
'\xfe' : ('b', "thorn"),
'\xff' : ('y', "yuml"),
}
def _makeRE(limit):
"""Returns a regular expression object that will match special characters
up to the given limit."""
return re.compile("([\x80-\\x%s])" % limit, re.M)
ALL = _makeRE('ff')
ONLY_WINDOWS = _makeRE('9f')
def _replHTML(match):
"Replace the matched character with its HTML equivalent."
return _repl(match, 1)
def _repl(match, html=0):
"Replace the matched character with its HTML or ASCII equivalent."
g = match.group(0)
a = CHARS.get(g,g)
if type(a) == types.TupleType:
a = a[html]
if html:
a = '&' + a + ';'
return a
def _dammit(t, html=0, fixWindowsOnly=0):
"Turns ISO-Latin-1 into an ASCII representation, dammit."
r = ALL
if fixWindowsOnly:
r = ONLY_WINDOWS
m = _repl
if html:
m = _replHTML
return re.sub(r, m, t)
def asciiDammit(t, fixWindowsOnly=0):
"Turns ISO-Latin-1 into a plain ASCII approximation, dammit."
return _dammit(t, 0, fixWindowsOnly)
def htmlDammit(t, fixWindowsOnly=0):
"Turns ISO-Latin-1 into plain ASCII with HTML codes, dammit."
return _dammit(t, 1, fixWindowsOnly=fixWindowsOnly)
def demoronise(t):
"""Helper method named in honor of the original smart quotes
remover, The Demoroniser:
http://www.fourmilab.ch/webtools/demoroniser/"""
return asciiDammit(t, 1)
if __name__ == '__main__':
french = '\x93Sacr\xe9 bleu!\x93'
print "First we mangle some French."
print asciiDammit(french)
print htmlDammit(french)
print
print "And now we fix the MS-quotes but leave the French alone."
print demoronise(french)
print htmlDammit(french, 1)
| {
"repo_name": "pombredanne/SourceForge-Allura",
"path": "Allura/allura/lib/AsciiDammit.py",
"copies": "5",
"size": "7036",
"license": "apache-2.0",
"hash": 8455181613443969000,
"line_mean": 31.4239631336,
"line_max": 77,
"alpha_frac": 0.4090392268,
"autogenerated": false,
"ratio": 2.968776371308017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01984798696917275,
"num_lines": 217
} |
"""Ascii menu class"""
from __future__ import print_function
def ascii_menu(title=None, menu_list=None):
"""
creates a simple ASCII menu from a list of tuples containing a label
and a functions reference. The function should not use parameters.
:param title: the title of the menu
:param menu_list: an array of tuples [('label', f1), ...]
"""
if not title:
title = "Menu"
n = len(menu_list)
def display():
index = 1
print()
print(title)
print(len(title) * "=")
print()
for label, function in menu_list:
print(" {0} - {1}".format(index, label))
index += 1
print(" q - quit")
print()
print()
display()
running = True
while running:
result = raw_input("Select between {0} - {1}: ".format(1, n))
print("<{0}>".format(result))
if result.strip() in ["q"]:
running = False
else:
try:
result = int(result) - 1
if 0 <= result < n:
(label, f) = menu_list[result]
print("EXECUTING:", label, f.__name__)
f()
else:
print("ERROR: wrong selection")
except Exception, e:
print("ERROR: ", e)
display()
def menu_return_num(title=None, menu_list=None, tries=1):
"""
creates a simple ASCII menu from a list of labels
:param title: the title of the menu
:param menu_list: a list of labels to choose
:param tries: num of tries till discard
:return: choice num (head: 0), quit: return 'q'
"""
if not title:
title = "Menu"
n = len(menu_list)
def display():
index = 1
print()
print(title)
print(len(title) * "=")
print()
for label in menu_list:
print(" {0} - {1}".format(index, label))
index += 1
print(" q - quit")
print()
print()
display()
while tries > 0:
# display()
result = raw_input("Select between {0} - {1}: ".format(1, n))
if result == "q":
return 'q'
else:
try:
result = int(result)
except:
print("invalid input...")
tries -= 1
continue
if 0 < result <= n:
print("choice {0} selected.".format(result))
return result - 1
else:
print("ERROR: wrong selection")
return 'q'
| {
"repo_name": "rajpushkar83/base",
"path": "cloudmesh_base/menu.py",
"copies": "1",
"size": "2610",
"license": "apache-2.0",
"hash": 4095497446412312000,
"line_mean": 26.1875,
"line_max": 72,
"alpha_frac": 0.4739463602,
"autogenerated": false,
"ratio": 4.202898550724638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005208333333333333,
"num_lines": 96
} |
'''Ascii menu class'''
from __future__ import print_function
def ascii_menu(title=None, menu_list=None):
'''
creates a simple ASCII menu from a list of tuples containing a label
and a functions refernec. The function should not use parameters.
:param title: the title of the menu
:param menu_list: an array of tuples [('label', f1), ...]
'''
if not title:
title = "Menu"
n = len(menu_list)
def display():
index = 1
print()
print(title)
print(len(title) * "=")
print()
for (label, function) in menu_list:
print(" {0} - {1}".format(index, label))
index += 1
print(" q - quit")
print()
print()
display()
running = True
while running:
result = raw_input("Select between {0} - {1}: ".format(1, n))
print("<{0}>".format(result))
if result.strip() in ["q"]:
running = False
else:
try:
result = int(result) - 1
if result >= 0 and result < n:
(label, f) = menu_list[result]
print("EXECUTING:", label, f.__name__)
f()
else:
print("ERROR: wrong selection")
except Exception, e:
print("ERROR: ", e)
display()
def menu_return_num(title=None, menu_list=None, tries=1):
'''
creates a simple ASCII menu from a list of labels
:param title: the title of the menu
:param menu_list: a list of labels to choose
:param tries: num of tries till discard
:return: choice num (head: 0), quit: return 'q'
'''
if not title:
title = "Menu"
n = len(menu_list)
def display():
index = 1
print()
print(title)
print(len(title) * "=")
print()
for label in menu_list:
print(" {0} - {1}".format(index, label))
index += 1
print(" q - quit")
print()
print()
display()
while tries > 0:
# display()
result = raw_input("Select between {0} - {1}: ".format(1, n))
if result == "q":
return 'q'
else:
try:
result = int(result)
except:
print("invalid input...")
tries = tries - 1
continue
if result > 0 and result <= n:
print("choice {0} selected.".format(result))
return result - 1
else:
print("ERROR: wrong selection")
return 'q'
| {
"repo_name": "rajpushkar83/cloudmesh",
"path": "cloudmesh/util/menu.py",
"copies": "1",
"size": "2640",
"license": "apache-2.0",
"hash": 830469851992374300,
"line_mean": 26.5,
"line_max": 72,
"alpha_frac": 0.4768939394,
"autogenerated": false,
"ratio": 4.190476190476191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005208333333333333,
"num_lines": 96
} |
"""AsciiPic base exception handling."""
class AsciipicException(Exception):
"""Base Asciipic exception
To correctly use this class, inherit from it and define
a `template` property.
That `template` will be formated using the keyword arguments
provided to the constructor.
"""
template = "An unknown exception occurred."
def __init__(self, message=None, **kwargs):
message = message or self.template
try:
message = message % kwargs
except (TypeError, KeyError):
# Something went wrong during message formatting.
# Probably kwargs doesn't match a variable in the message.
message = ("Message: %(template)s. Extra or "
"missing info: %(kwargs)s" %
{"template": message, "kwargs": kwargs})
super(AsciipicException, self).__init__(message)
class CliError(AsciipicException):
"""Something went wrong during the processing of command line."""
template = "Something went wrong during the procesing of command line."
class Invalid(AsciipicException):
"""The received object is not valid."""
template = "Unacceptable parameters."
class NotFound(AsciipicException):
"""The required object is not available in container."""
template = "The %(object)r was not found in %(container)s."
class NotSupported(AsciipicException):
"""The functionality required is not available in the current context."""
template = "%(feature)s is not available in %(context)s."
class InvalidName(AsciipicException):
"""The name was not found in the context."""
template = "%(name)s not found in the %(list_name) list."
class TableNameAlreadyExists(AsciipicException):
"""The table name already exists."""
template = "A table with the name %(name)s already exists."
class ItemNotFound(AsciipicException):
"""The item was not found."""
template = "The item with this id : %(id)s was not found."
class TooManyItems(AsciipicException):
"""Too many items returned."""
template = "Expetect %(expected)s items but found %(found)s."
class QueryError(AsciipicException):
"""Somethign went wrong when executing the query."""
template = "Error while interacting with DB : %(msg)s"
class InvalidCredentials(AsciipicException):
"""The credentials are invalid."""
template = "Credentials provider for user %(username)s are invalid."
class UnableToGenerateToken(AsciipicException):
"""Something went wrong when generating the token."""
template = ("Something went wrong when generating the token "
"for user : %(username)s")
class FailedTooManyTimes(AsciipicException):
"""The operations failed too many times."""
template = ("Method %(method) from module %(module)s failed"
" too many times.")
| {
"repo_name": "micumatei/asciipic",
"path": "asciipic/common/exception.py",
"copies": "1",
"size": "2888",
"license": "mit",
"hash": -236484783693629980,
"line_mean": 24.3333333333,
"line_max": 77,
"alpha_frac": 0.665166205,
"autogenerated": false,
"ratio": 4.369137670196672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5534303875196671,
"avg_score": null,
"num_lines": null
} |
"""ASCII plots (experimental).
The plots are printed directly to standard output.
"""
import typing
if typing.TYPE_CHECKING:
from physt.histogram_nd import Histogram2D
try:
import asciiplotlib
ENABLE_ASCIIPLOTLIB = True
except ImportError:
asciiplotlib = None
ENABLE_ASCIIPLOTLIB = False
types: typing.Tuple[str, ...] = ("hbar",)
dims = {
"hbar": [1],
}
def hbar(h1, width=80, show_values=False):
if ENABLE_ASCIIPLOTLIB:
data = h1.frequencies
edges = h1.numpy_bins
fig = asciiplotlib.figure()
fig.hist(data, edges, orientation="horizontal")
fig.show()
else:
data = (h1.normalize().frequencies * width).round().astype(int)
for i in range(h1.bin_count):
if show_values:
print("#" * data[i], h1.frequencies[i])
else:
print("#" * data[i])
try:
import xtermcolor
SUPPORTED_CMAPS = ("Greys", "Greys_r")
DEFAULT_CMAP = SUPPORTED_CMAPS[1]
def map(h2: "Histogram2D", **kwargs):
"""Heat map.
Note: Available only if xtermcolor present.
"""
# Value format
val_format = kwargs.pop("value_format", ".2f")
if isinstance(val_format, str):
value_format = lambda val: (("{0:" + val_format + "}").format(val))
data = (h2.frequencies / h2.frequencies.max() * 255).astype(int)
# Colour map
cmap = kwargs.pop("cmap", DEFAULT_CMAP)
if cmap == "Greys":
data = 255 - data
colorbar_range = range(h2.shape[1] + 1, -1, -1)
elif cmap == "Greys_r":
colorbar_range = range(h2.shape[1] + 2)
else:
raise ValueError(f"Unsupported colormap: {cmap}, select from: {SUPPORTED_CMAPS}")
colors = (65536 + 256 + 1) * data
print((value_format(h2.get_bin_right_edges(0)[-1]) + " →").rjust(h2.shape[1] + 2, " "))
print("+" + "-" * h2.shape[1] + "+")
for i in range(h2.shape[0] - 1, -1, -1):
line_frags = [
xtermcolor.colorize("█", bg=0, rgb=colors[i, j]) for j in range(h2.shape[1])
]
line = "|" + "".join(line_frags) + "|"
if i == h2.shape[0] - 1:
line += value_format(h2.get_bin_right_edges(1)[-1]) + " ↑"
if i == 0:
line += value_format(h2.get_bin_left_edges(1)[0]) + " ↓"
print(line)
print("+" + "-" * h2.shape[1] + "+")
print("←", value_format(h2.get_bin_left_edges(0)[0]))
colorbar_frags = [
xtermcolor.colorize("█", bg=0, rgb=(65536 + 256 + 1) * int(j * 255 / (h2.shape[1] + 2)))
for j in colorbar_range
]
colorbar = "".join(colorbar_frags)
print()
print("↓", 0)
print(colorbar)
print(str(h2.frequencies.max()).rjust(h2.shape[1], " "), "↑")
types = types + ("map",)
dims["map"] = [2]
except ImportError:
pass
| {
"repo_name": "janpipek/physt",
"path": "physt/plotting/ascii.py",
"copies": "1",
"size": "2984",
"license": "mit",
"hash": 5587454046235464000,
"line_mean": 28.9797979798,
"line_max": 100,
"alpha_frac": 0.5219002695,
"autogenerated": false,
"ratio": 3.120925341745531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41428256112455314,
"avg_score": null,
"num_lines": null
} |
"""ASCII Printer"""
from ..utils.singleton import Singleton
from ..algebra.core.exceptions import BasisNotSetError
from .base import QnetBasePrinter
from .sympy import SympyStrPrinter
from ._precedence import precedence, PRECEDENCE
__all__ = []
__private__ = ['QnetAsciiPrinter', 'QnetAsciiDefaultPrinter']
class QnetAsciiPrinter(QnetBasePrinter):
"""Printer for a string (ASCII) representation.
Attributes:
_parenth_left (str): String to use for a left parenthesis
(e.g. '\left(' in LaTeX). Used by :meth:`_split_op`
_parenth_left (str): String to use for a right parenthesis
_dagger_sym (str): Symbol that indicates the complex conjugate of an
operator. Used by :meth:`_split_op`
_tensor_sym (str): Symbol to use for tensor products. Used by
:meth:`_render_hs_label`.
"""
sympy_printer_cls = SympyStrPrinter
printmethod = '_ascii'
_default_settings = {
'show_hs_label': True, # alternatively: False, 'subscript'
'sig_as_ketbra': True,
}
_parenth_left = '('
_parenth_right = ')'
_bracket_left = '['
_bracket_right = ']'
_dagger_sym = 'H'
_tensor_sym = '*'
_product_sym = '*'
_circuit_series_sym = "<<"
_circuit_concat_sym = "+"
_cid = 'cid(%d)'
_sum_sym = 'Sum'
_element_sym = 'in'
_ellipsis = '...'
_set_delim_left = '{'
_set_delim_right = '}'
@property
def _spaced_product_sym(self):
if len(self._product_sym.strip()) == 0:
return self._product_sym
else:
return " %s " % self._product_sym
def _split_identifier(self, identifier):
"""Split the given identifier at the first underscore into (rendered)
name and subscript. Both `name` and `subscript` are rendered as
strings"""
try:
name, subscript = identifier.split("_", 1)
except (TypeError, ValueError, AttributeError):
name = identifier
subscript = ''
return self._render_str(name), self._render_str(subscript)
def _split_op(
self, identifier, hs_label=None, dagger=False, args=None):
"""Return `name`, total `subscript`, total `superscript` and
`arguments` str. All of the returned strings are fully rendered.
Args:
identifier (str or SymbolicLabelBase): A (non-rendered/ascii)
identifier that may include a subscript. The output `name` will
be the `identifier` without any subscript
hs_label (str): The rendered label for the Hilbert space of the
operator, or None. Returned unchanged.
dagger (bool): Flag to indicate whether the operator is daggered.
If True, :attr:`dagger_sym` will be included in the
`superscript` (or `subscript`, depending on the settings)
args (list or None): List of arguments (expressions). Each element
will be rendered with :meth:`doprint`. The total list of args
will then be joined with commas, enclosed
with :attr:`_parenth_left` and :attr:`parenth_right`, and
returnd as the `arguments` string
"""
if self._isinstance(identifier, 'SymbolicLabelBase'):
identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES(
identifier.expr)
name, total_subscript = self._split_identifier(identifier)
total_superscript = ''
if (hs_label not in [None, '']):
if self._settings['show_hs_label'] == 'subscript':
if len(total_subscript) == 0:
total_subscript = '(' + hs_label + ')'
else:
total_subscript += ',(' + hs_label + ')'
else:
total_superscript += '(' + hs_label + ')'
if dagger:
total_superscript += self._dagger_sym
args_str = ''
if (args is not None) and (len(args) > 0):
args_str = (self._parenth_left +
",".join([self.doprint(arg) for arg in args]) +
self._parenth_right)
return name, total_subscript, total_superscript, args_str
@classmethod
def _is_single_letter(cls, label):
return len(label) == 1
def _render_hs_label(self, hs):
"""Return the label of the given Hilbert space as a string"""
if isinstance(hs.__class__, Singleton):
return self._render_str(hs.label)
else:
return self._tensor_sym.join(
[self._render_str(ls.label) for ls in hs.local_factors])
def _render_state_label(self, label):
if self._isinstance(label, 'SymbolicLabelBase'):
return self._print_SCALAR_TYPES(label.expr)
else:
return self._render_str(label)
def _braket_fmt(self, expr_type):
"""Return a format string for printing an `expr_type`
ket/bra/ketbra/braket"""
mapping = {
'bra': {
True: '<{label}|^({space})',
'subscript': '<{label}|_({space})',
False: '<{label}|'},
'ket': {
True: '|{label}>^({space})',
'subscript': '|{label}>_({space})',
False: '|{label}>'},
'ketbra': {
True: '|{label_i}><{label_j}|^({space})',
'subscript': '|{label_i}><{label_j}|_({space})',
False: '|{label_i}><{label_j}|'},
'braket': {
True: '<{label_i}|{label_j}>^({space})',
'subscript': '<{label_i}|{label_j}>_({space})',
False: '<{label_i}|{label_j}>'},
}
hs_setting = bool(self._settings['show_hs_label'])
if self._settings['show_hs_label'] == 'subscript':
hs_setting = 'subscript'
return mapping[expr_type][hs_setting]
def _render_op(
self, identifier, hs=None, dagger=False, args=None, superop=False):
"""Render an operator
Args:
identifier (str or SymbolicLabelBase): The identifier (name/symbol)
of the operator. May include a subscript, denoted by '_'.
hs (qnet.algebra.hilbert_space_algebra.HilbertSpace): The Hilbert
space in which the operator is defined
dagger (bool): Whether the operator should be daggered
args (list): A list of expressions that will be rendered with
:meth:`doprint`, joined with commas, enclosed in parenthesis
superop (bool): Whether the operator is a super-operator
"""
hs_label = None
if hs is not None and self._settings['show_hs_label']:
hs_label = self._render_hs_label(hs)
name, total_subscript, total_superscript, args_str \
= self._split_op(identifier, hs_label, dagger, args)
res = name
if len(total_subscript) > 0:
res += "_" + total_subscript
if len(total_superscript) > 0:
res += "^" + total_superscript
if len(args_str) > 0:
res += args_str
return res
def parenthesize(self, expr, level, *args, strict=False, **kwargs):
"""Render `expr` and wrap the result in parentheses if the precedence
of `expr` is below the given `level` (or at the given `level` if
`strict` is True. Extra `args` and `kwargs` are passed to the internal
`doit` renderer"""
needs_parenths = (
(precedence(expr) < level) or
(strict and precedence(expr) == level))
if needs_parenths:
return (
self._parenth_left + self.doprint(expr, *args, **kwargs) +
self._parenth_right)
else:
return self.doprint(expr, *args, **kwargs)
def _print_tuple(self, expr):
return (
self._parenth_left + ", ".join([self.doprint(c) for c in expr])
+ self._parenth_right)
def _print_list(self, expr):
return (
self._bracket_left + ", ".join([self.doprint(c) for c in expr])
+ self._bracket_right)
def _print_CircuitSymbol(self, expr):
res = self._render_str(expr.label)
if len(expr.sym_args) > 0:
res += (
self._parenth_left +
", ".join([self.doprint(arg) for arg in expr.sym_args]) +
self._parenth_right)
return res
def _print_CPermutation(self, expr):
return r'Perm(%s)' % (
", ".join(map(self._render_str, expr.permutation)))
def _print_SeriesProduct(self, expr):
prec = precedence(expr)
circuit_series_sym = " " + self._circuit_series_sym + " "
return circuit_series_sym.join(
[self.parenthesize(op, prec) for op in expr.operands])
def _print_Concatenation(self, expr):
prec = precedence(expr)
reduced_operands = [] # reduce consecutive identities to a str
id_count = 0
for o in expr.operands:
if self._isinstance(o, 'CIdentity'):
id_count += 1
else:
if id_count > 0:
reduced_operands.append(self._cid % id_count)
id_count = 0
reduced_operands.append(o)
if id_count > 0:
reduced_operands.append(self._cid % id_count)
circuit_concat_sym = " " + self._circuit_concat_sym + " "
parts = []
for op in reduced_operands:
if self._isinstance(op, 'SeriesProduct'):
# while a SeriesProduct has a higher precedence than a
# Concatenation, for most readers, extra parentheses will be
# helpful
# TODO: make this an option
parts.append(
self._parenth_left + self.doprint(op) +
self._parenth_right)
else:
parts.append(self.parenthesize(op, prec))
return circuit_concat_sym.join(parts)
def _print_Feedback(self, expr):
o, i = expr.out_in_pair
return '[{operand}]_{{{output}->{input}}}'.format(
operand=self.doprint(expr.operand), output=o, input=i)
def _print_SeriesInverse(self, expr):
return r'[{operand}]^{{-1}}'.format(
operand=self.doprint(expr.operand))
def _print_HilbertSpace(self, expr):
return r'H_{label}'.format(
label=self._render_hs_label(expr))
def _print_ProductSpace(self, expr):
tensor_sym = " " + self._tensor_sym + " "
return tensor_sym.join(
[self.doprint(op) for op in expr.operands])
def _print_OperatorSymbol(self, expr, adjoint=False):
res = self._render_op(expr.label, expr._hs, dagger=adjoint)
if len(expr.sym_args) > 0:
res += (
self._parenth_left +
", ".join([self.doprint(arg) for arg in expr.sym_args]) +
self._parenth_right)
return res
def _print_LocalOperator(self, expr, adjoint=False):
if adjoint:
dagger = not expr._dagger
else:
dagger = expr._dagger
return self._render_op(
expr.identifier, expr._hs, dagger=dagger, args=expr.args)
def _print_LocalSigma(self, expr, adjoint=False):
if self._settings['sig_as_ketbra']:
fmt = self._braket_fmt('ketbra')
if adjoint:
return fmt.format(
label_i=self._render_state_label(expr.k),
label_j=self._render_state_label(expr.j),
space=self._render_hs_label(expr.space))
else:
return fmt.format(
label_i=self._render_state_label(expr.j),
label_j=self._render_state_label(expr.k),
space=self._render_hs_label(expr.space))
else:
if expr.j == expr.k:
identifier = "%s_%s" % (expr._identifier_projector, expr.j)
else:
if adjoint:
identifier = "%s_%s,%s" % (expr.identifier, expr.k, expr.j)
else:
identifier = "%s_%s,%s" % (expr.identifier, expr.j, expr.k)
return self._render_op(identifier, expr._hs, dagger=adjoint)
def _print_IdentityOperator(self, expr):
return "1"
def _print_ZeroOperator(self, expr):
return "0"
def _print_ScalarValue(self, expr, **kwargs):
return self.doprint(expr.val, **kwargs)
def _print_Zero(self, expr, **kwargs):
return "0"
def _print_One(self, expr, **kwargs):
return "1"
def _print_ScalarTimesQuantumExpression(self, expr, **kwargs):
prec = PRECEDENCE['Mul']
coeff, term = expr.coeff, expr.term
term_str = self.doprint(term, **kwargs)
if precedence(term) < prec:
term_str = self._parenth_left + term_str + self._parenth_right
if coeff == -1:
if term_str.startswith(self._parenth_left):
return "- " + term_str
else:
return "-" + term_str
if 'adjoint' in kwargs:
coeff_str = self.doprint(coeff, adjoint=kwargs['adjoint'])
else:
coeff_str = self.doprint(coeff)
if term_str in [
'1', self._print_IdentityOperator(expr),
self._print_IdentitySuperOperator]:
return coeff_str
else:
coeff_str = coeff_str.strip()
if precedence(coeff) < prec and precedence(-coeff) < prec:
# the above precedence check catches on only for true sums
coeff_str = (
self._parenth_left + coeff_str + self._parenth_right)
return coeff_str + self._spaced_product_sym + term_str.strip()
def _print_QuantumPlus(self, expr, adjoint=False, superop=False):
prec = precedence(expr)
l = []
kwargs = {}
if adjoint:
kwargs['adjoint'] = adjoint
if superop:
kwargs['superop'] = superop
for term in expr.args:
t = self.doprint(term, **kwargs)
if t.startswith('-'):
sign = "-"
t = t[1:].strip()
else:
sign = "+"
if precedence(term) < prec:
l.extend([sign, self._parenth_left + t + self._parenth_right])
else:
l.extend([sign, t])
try:
sign = l.pop(0)
if sign == '+':
sign = ""
except IndexError:
sign = ""
return sign + ' '.join(l)
def _print_QuantumTimes(self, expr, **kwargs):
prec = precedence(expr)
return self._spaced_product_sym.join(
[self.parenthesize(op, prec, **kwargs) for op in expr.operands])
def _print_Commutator(self, expr, adjoint=False):
res = "[" + self.doprint(expr.A) + ", " + self.doprint(expr.B) + "]"
if adjoint:
res += "^" + self._dagger_sym
return res
def _print_OperatorTrace(self, expr, adjoint=False):
s = self._render_hs_label(expr._over_space)
kwargs = {}
if adjoint:
kwargs['adjoint'] = adjoint
o = self.doprint(expr.operand, **kwargs)
return r'tr_({space})[{operand}]'.format(space=s, operand=o)
def _print_Adjoint(self, expr, adjoint=False):
o = expr.operand
if self._isinstance(o, 'LocalOperator'):
if adjoint:
dagger = o._dagger
else:
dagger = not o._dagger
return self._render_op(
o.identifier, hs=o.space, dagger=dagger, args=o.args[1:])
elif self._isinstance(o, 'OperatorSymbol'):
return self._render_op(
o.label, hs=o.space, dagger=(not adjoint))
else:
if adjoint:
return self.doprint(o)
else:
return (
self._parenth_left + self.doprint(o) +
self._parenth_right + "^" + self._dagger_sym)
def _print_OperatorPlusMinusCC(self, expr):
prec = precedence(expr)
o = expr.operand
sign_str = ' + '
if expr._sign < 0:
sign_str = ' - '
return self.parenthesize(o, prec) + sign_str + "c.c."
def _print_PseudoInverse(self, expr):
prec = precedence(expr)
return self.parenthesize(expr.operand, prec) + "^+"
def _print_NullSpaceProjector(self, expr, adjoint=False):
null_space_proj_sym = 'P_Ker'
return self._render_op(
null_space_proj_sym, hs=None, args=expr.operands, dagger=adjoint)
def _print_KetSymbol(self, expr, adjoint=False):
if adjoint:
fmt = self._braket_fmt('bra')
else:
fmt = self._braket_fmt('ket')
label = self._render_state_label(expr.label)
if len(expr.sym_args) > 0:
label += (
self._parenth_left +
", ".join([self.doprint(arg) for arg in expr.sym_args]) +
self._parenth_right)
return fmt.format(
label=label, space=self._render_hs_label(expr.space))
def _print_ZeroKet(self, expr, adjoint=False):
return "0"
def _print_TrivialKet(self, expr, adjoint=False):
return "1"
def _print_CoherentStateKet(self, expr, adjoint=False):
if adjoint:
fmt = self._braket_fmt('bra')
else:
fmt = self._braket_fmt('ket')
label = self._render_state_label('alpha=') + self.doprint(expr._ampl)
space = self._render_hs_label(expr.space)
return fmt.format(label=label, space=space)
def _print_TensorKet(self, expr, adjoint=False):
if all(self._isinstance(o, 'BasisKet') for o in expr.operands):
labels = [self._render_state_label(o.label) for o in expr.operands]
single_letters = all([self._is_single_letter(l) for l in labels])
try:
small_hs = all(
[(o.space.dimension < 10) for o in expr.operands])
except BasisNotSetError:
small_hs = False
if small_hs and single_letters:
joiner = ""
else:
joiner = ","
label = joiner.join(labels)
fmt = self._braket_fmt('ket')
if adjoint:
fmt = self._braket_fmt('bra')
space = self._render_hs_label(expr.space)
return fmt.format(label=label, space=space)
else:
prec = precedence(expr)
kwargs = {}
if adjoint:
kwargs['adjoint'] = adjoint
tensor_sym = " %s " % self._tensor_sym
return tensor_sym.join([
self.parenthesize(op, prec, **kwargs)
for op in expr.operands])
def _print_OperatorTimesKet(self, expr, adjoint=False):
prec = precedence(expr)
op, ket = expr.operator, expr.ket
kwargs = {}
if adjoint:
kwargs['adjoint'] = adjoint
rendered_op = self.parenthesize(op, prec, **kwargs)
rendered_ket = self.parenthesize(ket, prec, **kwargs)
if adjoint:
return rendered_ket + " " + rendered_op
else:
return rendered_op + " " + rendered_ket
def _print_IndexedSum(self, expr, adjoint=False):
prec = precedence(expr)
kwargs = {}
if adjoint:
kwargs['adjoint'] = adjoint
indices = []
bottom_rhs = None
top = None
res = ''
# ranges with the same limits are grouped into the same sum symbol
for index_range in expr.ranges:
current_index = self.doprint(index_range, which='bottom_index')
current_bottom_rhs = self.doprint(index_range, which='bottom_rhs')
current_top = self.doprint(index_range, which='top')
if top is not None: # index_ranges after the first one
if current_top != top or current_bottom_rhs != bottom_rhs:
res += self._sum_sym
bottom = ",".join(indices) + bottom_rhs
if len(bottom) > 0:
res += '_{%s}' % bottom
if len(top) > 0:
res += '^{%s}' % top
res += " "
indices = [current_index, ]
top = current_top
bottom_rhs = current_bottom_rhs
else:
indices.append(current_index)
else: # first range
indices.append(current_index)
top = current_top
bottom_rhs = current_bottom_rhs
# add the final accumulated sum symbol
res += self._sum_sym
bottom = ",".join(indices) + bottom_rhs
if len(bottom) > 0:
res += '_{%s}' % bottom
if len(top) > 0:
res += '^{%s}' % top
res += " " + self.parenthesize(expr.term, prec, strict=True, **kwargs)
return res
def _print_IndexRangeBase(self, expr, which='bottom'):
assert which in ['bottom', 'bottom_index', 'bottom_rhs', 'top']
if which in ['bottom', 'bottom_index']:
return self.doprint(expr.index_symbol)
else:
return ''
def _print_IndexOverFockSpace(self, expr, which='bottom'):
assert which in ['bottom', 'bottom_index', 'bottom_rhs', 'top']
if 'bottom' in which:
bottom_index = self.doprint(expr.index_symbol)
bottom_rhs = " " + self._element_sym + " " + self.doprint(expr.hs)
if which == 'bottom_index':
return bottom_index
elif which == 'bottom_rhs':
return bottom_rhs
else:
return bottom_index + bottom_rhs
elif which == 'top':
return ''
else:
raise ValueError("invalid `which`: %s" % which)
def _print_IndexOverList(self, expr, which='bottom'):
assert which in ['bottom', 'bottom_index', 'bottom_rhs', 'top']
if 'bottom' in which:
bottom_index = self.doprint(expr.index_symbol)
bottom_rhs = (
" " + self._element_sym + " " + self._set_delim_left +
",".join([self.doprint(val) for val in expr.values]) +
self._set_delim_right)
if which == 'bottom_index':
return bottom_index
elif which == 'bottom_rhs':
return bottom_rhs
else:
return bottom_index + bottom_rhs
elif which == 'top':
return ''
else:
raise ValueError("invalid `which`: %s" % which)
def _print_IndexOverRange(self, expr, which='bottom'):
assert which in ['bottom', 'bottom_index', 'bottom_rhs', 'top']
if 'bottom' in which:
bottom_index = self.doprint(expr.index_symbol)
bottom_rhs = "=%s" % expr.start_from
if abs(expr.step) > 1:
bottom_rhs += ", %s" % expr.start_from + expr.step
bottom_rhs += ", " + self._ellipsis
if which == 'bottom_index':
return bottom_index
elif which == 'bottom_rhs':
return bottom_rhs
else:
return bottom_index + bottom_rhs
elif which == 'top':
return str(expr.to)
else:
raise ValueError("invalid `which`: %s" % which)
def _print_BaseLabel(self, expr):
return self.doprint(expr.expr)
def _print_Bra(self, expr, adjoint=False):
return self.doprint(expr.ket, adjoint=(not adjoint))
def _print_BraKet(self, expr, adjoint=False):
trivial = True
try:
bra_label = self._render_state_label(expr.bra.label)
bra = expr.bra.ket
if hasattr(bra, 'sym_args') and len(bra.sym_args) > 0:
bra_label += (
self._parenth_left +
", ".join([self.doprint(arg) for arg in bra.sym_args]) +
self._parenth_right)
except AttributeError:
trivial = False
try:
ket_label = self._render_state_label(expr.ket.label)
if hasattr(expr.ket, 'sym_args') and len(expr.ket.sym_args) > 0:
ket_label += (
self._parenth_left +
", ".join(
[self.doprint(arg) for arg in expr.ket.sym_args]) +
self._parenth_right)
except AttributeError:
trivial = False
if trivial:
fmt = self._braket_fmt('braket')
if adjoint:
return fmt.format(
label_i=ket_label, label_j=bra_label,
space=self._render_hs_label(expr.ket.space))
else:
return fmt.format(
label_i=bra_label, label_j=ket_label,
space=self._render_hs_label(expr.ket.space))
else:
prec = precedence(expr)
rendered_bra = self.parenthesize(expr.bra, prec, adjoint=adjoint)
rendered_ket = self.parenthesize(expr.ket, prec, adjoint=adjoint)
if adjoint:
return rendered_ket + self._spaced_product_sym + rendered_bra
else:
return rendered_bra + self._spaced_product_sym + rendered_ket
def _print_KetBra(self, expr, adjoint=False):
trivial = True
try:
bra_label = self._render_state_label(expr.bra.label)
bra = expr.bra.ket
if hasattr(bra, 'sym_args') and len(bra.sym_args) > 0:
bra_label += (
self._parenth_left +
", ".join([self.doprint(arg) for arg in bra.sym_args]) +
self._parenth_right)
except AttributeError:
trivial = False
try:
ket_label = self._render_state_label(expr.ket.label)
if hasattr(expr.ket, 'sym_args') and len(expr.ket.sym_args) > 0:
ket_label += (
self._parenth_left +
", ".join(
[self.doprint(arg) for arg in expr.ket.sym_args]) +
self._parenth_right)
except AttributeError:
trivial = False
if trivial:
fmt = self._braket_fmt('ketbra')
if adjoint:
return fmt.format(
label_i=bra_label, label_j=ket_label,
space=self._render_hs_label(expr.ket.space))
else:
return fmt.format(
label_i=ket_label, label_j=bra_label,
space=self._render_hs_label(expr.ket.space))
else:
prec = precedence(expr)
rendered_bra = self.parenthesize(expr.bra, prec, adjoint=adjoint)
rendered_ket = self.parenthesize(expr.ket, prec, adjoint=adjoint)
if adjoint:
return rendered_bra + rendered_ket
else:
return rendered_ket + rendered_bra
def _print_SuperOperatorSymbol(self, expr, adjoint=False, superop=True):
res = self._render_op(
expr.label, expr._hs, dagger=adjoint, superop=True)
if len(expr.sym_args) > 0:
res += (
self._parenth_left +
", ".join([self.doprint(arg) for arg in expr.sym_args]) +
self._parenth_right)
return res
def _print_IdentitySuperOperator(self, expr, superop=True):
return "1"
def _print_ZeroSuperOperator(self, expr, superop=True):
return "0"
def _print_SuperOperatorPlus(self, expr, adjoint=False, superop=True):
return self._print_QuantumPlus(expr, adjoint=adjoint, superop=True)
def _print_SuperOperatorTimes(self, expr, adjoint=False, superop=True):
kwargs = {}
if adjoint:
kwargs['adjoint'] = True
return self._print_QuantumTimes(expr, superop=True, **kwargs)
def _print_SuperAdjoint(self, expr, adjoint=False, superop=True):
o = expr.operand
if self._isinstance(o, 'SuperOperatorSymbol'):
return self._render_op(
o.label, hs=o.space, dagger=(not adjoint), superop=True)
else:
if adjoint:
return self.doprint(o)
else:
return (
self._parenth_left + self.doprint(o) +
self._parenth_right + "^" + self._dagger_sym)
def _print_SPre(self, expr, superop=True):
return (
"SPre" + self._parenth_left + self.doprint(expr.operands[0]) +
self._parenth_right)
def _print_SPost(self, expr, superop=True):
return (
"SPost" + self._parenth_left + self.doprint(expr.operands[0]) +
self._parenth_right)
def _print_SuperOperatorTimesOperator(self, expr):
prec = precedence(expr)
sop, op = expr.sop, expr.op
cs = self.parenthesize(sop, prec)
ct = self.doprint(op)
return "%s[%s]" % (cs, ct)
def _print_QuantumDerivative(self, expr):
res = ""
for sym, n in expr.derivs.items():
sym_str = self.doprint(sym)
if " " in sym_str:
sym_str = "(%s)" % sym_str
if n == 1:
res += "D_%s " % sym_str
else:
res += "D_%s^%s " % (sym_str, n)
res += self.parenthesize(expr.operand, PRECEDENCE['Mul'], strict=True)
if expr.vals:
evaluation_strs = []
for sym, val in expr.vals.items():
evaluation_strs.append(
"%s=%s" % (self.doprint(sym), self.doprint(val)))
res += " |_(%s)" % ", ".join(evaluation_strs)
return res
def _print_Matrix(self, expr):
matrix_left_sym = '['
matrix_right_sym = ']'
matrix_row_left_sym = '['
matrix_row_right_sym = ']'
matrix_col_sep_sym = ', '
matrix_row_sep_sym = ', '
row_strs = []
if len(expr.matrix) == 0:
row_strs.append(matrix_row_left_sym + matrix_row_right_sym)
row_strs.append(matrix_row_left_sym + matrix_row_right_sym)
else:
for row in expr.matrix:
row_strs.append(
matrix_row_left_sym +
matrix_col_sep_sym.join(
[self.doprint(entry) for entry in row]) +
matrix_row_right_sym)
return (
matrix_left_sym + matrix_row_sep_sym.join(row_strs) +
matrix_right_sym)
def _print_Eq(self, expr):
# print for qnet.algebra.toolbox.equality.Eq, but also works for any
# Eq class that has the minimum requirement to have an `lhs` and `rhs`
# attribute
try:
return expr._render_str(renderer=self.doprint)
except AttributeError:
return (self.doprint(expr.lhs) + ' = ' + self.doprint(expr.rhs))
class QnetAsciiDefaultPrinter(QnetAsciiPrinter):
"""Printer for an ASCII representation that accepts no settings. This can
be used internally when a well-defined, static representation is needed
(e.g. as a sort key)"""
_default_settings = {}
def __init__(self):
super().__init__(cache=None, settings=None)
self._settings = {
'show_hs_label': True,
'sig_as_ketbra': True}
| {
"repo_name": "mabuchilab/QNET",
"path": "src/qnet/printing/asciiprinter.py",
"copies": "1",
"size": "31763",
"license": "mit",
"hash": -1833190490758432300,
"line_mean": 37.925245098,
"line_max": 79,
"alpha_frac": 0.5264301231,
"autogenerated": false,
"ratio": 3.8688185140073084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9894665841889863,
"avg_score": 0.00011655904348919054,
"num_lines": 816
} |
"""AsciiTable end to end testing."""
import sys
from textwrap import dedent
import py
import pytest
from terminaltables import AsciiTable
from terminaltables.terminal_io import IS_WINDOWS
from tests import PROJECT_ROOT
from tests.screenshot import RunNewConsole, screenshot_until_match
HERE = py.path.local(__file__).dirpath()
def test_single_line():
"""Test single-lined cells."""
table_data = [
['Name', 'Color', 'Type'],
['Avocado', 'green', 'nut'],
['Tomato', 'red', 'fruit'],
['Lettuce', 'green', 'vegetable'],
['Watermelon', 'green'],
[],
]
table = AsciiTable(table_data, 'Example')
table.inner_footing_row_border = True
table.justify_columns[0] = 'left'
table.justify_columns[1] = 'center'
table.justify_columns[2] = 'right'
actual = table.table
expected = (
'+Example-----+-------+-----------+\n'
'| Name | Color | Type |\n'
'+------------+-------+-----------+\n'
'| Avocado | green | nut |\n'
'| Tomato | red | fruit |\n'
'| Lettuce | green | vegetable |\n'
'| Watermelon | green | |\n'
'+------------+-------+-----------+\n'
'| | | |\n'
'+------------+-------+-----------+'
)
assert actual == expected
def test_multi_line():
"""Test multi-lined cells."""
table_data = [
['Show', 'Characters'],
['Rugrats', 'Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles,\nDil Pickles'],
['South Park', 'Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick']
]
table = AsciiTable(table_data)
# Test defaults.
actual = table.table
expected = (
'+------------+-------------------------------------------------------------------------------------+\n'
'| Show | Characters |\n'
'+------------+-------------------------------------------------------------------------------------+\n'
'| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n'
'| | Dil Pickles |\n'
'| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n'
'+------------+-------------------------------------------------------------------------------------+'
)
assert actual == expected
# Test inner row border.
table.inner_row_border = True
actual = table.table
expected = (
'+------------+-------------------------------------------------------------------------------------+\n'
'| Show | Characters |\n'
'+------------+-------------------------------------------------------------------------------------+\n'
'| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n'
'| | Dil Pickles |\n'
'+------------+-------------------------------------------------------------------------------------+\n'
'| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n'
'+------------+-------------------------------------------------------------------------------------+'
)
assert actual == expected
# Justify right.
table.justify_columns = {1: 'right'}
actual = table.table
expected = (
'+------------+-------------------------------------------------------------------------------------+\n'
'| Show | Characters |\n'
'+------------+-------------------------------------------------------------------------------------+\n'
'| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n'
'| | Dil Pickles |\n'
'+------------+-------------------------------------------------------------------------------------+\n'
'| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n'
'+------------+-------------------------------------------------------------------------------------+'
)
assert actual == expected
@pytest.mark.skipif(str(not IS_WINDOWS))
@pytest.mark.skip # https://github.com/Robpol86/terminaltables/issues/44
def test_windows_screenshot(tmpdir):
"""Test on Windows in a new console window. Take a screenshot to verify it works.
:param tmpdir: pytest fixture.
"""
script = tmpdir.join('script.py')
command = [sys.executable, str(script)]
screenshot = PROJECT_ROOT.join('test_ascii_table.png')
if screenshot.check():
screenshot.remove()
# Generate script.
script_template = dedent(u"""\
from __future__ import print_function
import os, time
from colorclass import Color, Windows
from terminaltables import AsciiTable
Windows.enable(auto_colors=True)
stop_after = time.time() + 20
table_data = [
[Color('{b}Name{/b}'), Color('{b}Color{/b}'), Color('{b}Misc{/b}')],
['Avocado', Color('{autogreen}green{/fg}'), 100],
['Tomato', Color('{autored}red{/fg}'), 0.5],
['Lettuce', Color('{autogreen}green{/fg}'), None],
]
print(AsciiTable(table_data).table)
print('Waiting for screenshot_until_match()...')
while not os.path.exists(r'%s') and time.time() < stop_after:
time.sleep(0.5)
""")
script_contents = script_template % str(screenshot)
script.write(script_contents.encode('utf-8'), mode='wb')
# Setup expected.
sub_images = [str(p) for p in HERE.listdir('sub_ascii_*.bmp')]
assert sub_images
# Run.
with RunNewConsole(command) as gen:
screenshot_until_match(str(screenshot), 15, sub_images, 1, gen)
| {
"repo_name": "Robpol86/terminaltables",
"path": "tests/test_all_tables_e2e/test_ascii_table.py",
"copies": "1",
"size": "6273",
"license": "mit",
"hash": -8738396087833148000,
"line_mean": 42.2620689655,
"line_max": 120,
"alpha_frac": 0.4147935597,
"autogenerated": false,
"ratio": 4.276073619631902,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016253465733690993,
"num_lines": 145
} |
"""ASCII table generator"""
class ASCIITableRenderer(object):
def render(self, table):
total_width = 0
for col in table.cols:
total_width += col.width
# Header
out = self._format_separator(total_width)
cols_widths = [col.width for col in table.cols]
out += self._format_row([col.label for col in table.cols], cols_widths)
out += self._format_separator(total_width)
# Body
for row in table.rows:
if isinstance(row, ASCIITableRowComment):
out += self._format_row_comment(row.text, total_width)
else:
out += self._format_row(
[row.dict[key] for key in table.col_keys],
cols_widths
)
# Footer
out += self._format_separator(total_width)
return out
def _format_separator(self, width):
return '-' * width + '\n'
def _format_row_comment(self, text, width):
actual_width = width-4
out = ''
out += '| ' + ('-' * actual_width) + ' |\n'
form_str = '| {:' + '{:d}'.format(actual_width) + '} |\n'
out += form_str.format(text)
out += '| ' + ('-' * actual_width) + ' |\n'
return out
def _format_row(self, values, widths):
out = ''
for i, width in enumerate(widths):
actual_width = width - 3
# last column loses one more space for the trailing `|`
if i == len(widths) - 1:
actual_width -= 1
val = self._trunc(values[i], actual_width)
form_str = '| {:' + '{:d}'.format(actual_width) + 's} '
out += form_str.format(val)
out += '|\n'
return out
def _trunc(self, contents, width):
if contents is None:
return ''
if len(contents) <= width:
return contents
return contents[:width]
class ASCIITableColumn(object):
def __init__(self, label, width):
self.label = label
self.width = width
class ASCIITableRow(object):
def __init__(self, dict):
self.dict = dict
class ASCIITableRowComment(ASCIITableRow):
def __init__(self, text):
ASCIITableRow.__init__(self, {})
self.text = text
class ASCIITable(object):
def __init__(self):
# list of strings
self.col_keys = []
# list of ASCIITableColumn
self.cols = []
# list of ASCIITableRow
self.rows = []
def add_column(self, key, col):
if len(self.rows) != 0:
raise StandardError(
'cannot add columns after rows have been added'
)
if not isinstance(col, ASCIITableColumn):
raise TypeError()
self.col_keys.append(key)
self.cols.append(col)
def add_comment_row(self, text):
row = ASCIITableRowComment(text)
self.rows.append(row)
def add_row(self, dict):
dict_keys = dict.keys()
expected_keys = self.col_keys
if set(dict_keys) != set(expected_keys):
self._find_missing_key(expected_keys, dict_keys)
self._find_unknown_key(expected_keys, dict_keys)
row = ASCIITableRow(dict)
self.rows.append(row)
def _find_missing_key(self, expected_keys, row_keys):
for expected_key in expected_keys:
if expected_key in row_keys:
continue
raise ValueError('key `{}` is missing'.format(expected_key))
def _find_unknown_key(self, expected_keys, row_keys):
for row_key in row_keys:
if row_key in expected_keys:
continue
raise ValueError('key `{}` is not defined'.format(row_key))
| {
"repo_name": "ice-stuff/ice",
"path": "ice/ascii_table.py",
"copies": "1",
"size": "3755",
"license": "mit",
"hash": 4520670069431780400,
"line_mean": 28.3359375,
"line_max": 79,
"alpha_frac": 0.5360852197,
"autogenerated": false,
"ratio": 3.8044579533941234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840543173094123,
"avg_score": 0,
"num_lines": 128
} |
"""AsciiTable is the main table class. To be inherited by other tables. Define convenience methods here."""
from terminaltables.base_table import BaseTable
from terminaltables.terminal_io import terminal_size
from terminaltables.width_and_alignment import column_max_width, max_dimensions, table_width
class AsciiTable(BaseTable):
"""Draw a table using regular ASCII characters, such as ``+``, ``|``, and ``-``.
:ivar iter table_data: List (empty or list of lists of strings) representing the table.
:ivar str title: Optional title to show within the top border of the table.
:ivar bool inner_column_border: Separates columns.
:ivar bool inner_footing_row_border: Show a border before the last row.
:ivar bool inner_heading_row_border: Show a border after the first row.
:ivar bool inner_row_border: Show a border in between every row.
:ivar bool outer_border: Show the top, left, right, and bottom border.
:ivar dict justify_columns: Horizontal justification. Keys are column indexes (int). Values are right/left/center.
:ivar int padding_left: Number of spaces to pad on the left side of every cell.
:ivar int padding_right: Number of spaces to pad on the right side of every cell.
"""
def column_max_width(self, column_number):
"""Return the maximum width of a column based on the current terminal width.
:param int column_number: The column number to query.
:return: The max width of the column.
:rtype: int
"""
inner_widths = max_dimensions(self.table_data)[0]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
padding = self.padding_left + self.padding_right
return column_max_width(inner_widths, column_number, outer_border, inner_border, padding)
@property
def column_widths(self):
"""Return a list of integers representing the widths of each table column without padding."""
if not self.table_data:
return list()
return max_dimensions(self.table_data)[0]
@property
def ok(self): # Too late to change API. # pylint: disable=invalid-name
"""Return True if the table fits within the terminal width, False if the table breaks."""
return self.table_width <= terminal_size()[0]
@property
def table_width(self):
"""Return the width of the table including padding and borders."""
outer_widths = max_dimensions(self.table_data, self.padding_left, self.padding_right)[2]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
return table_width(outer_widths, outer_border, inner_border)
| {
"repo_name": "Robpol86/terminaltables",
"path": "terminaltables/ascii_table.py",
"copies": "3",
"size": "2734",
"license": "mit",
"hash": -5722899200528905000,
"line_mean": 48.7090909091,
"line_max": 118,
"alpha_frac": 0.6942209217,
"autogenerated": false,
"ratio": 4.068452380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6262673302652382,
"avg_score": null,
"num_lines": null
} |
import sys
from PDFWriter import PDFWriter
# Define the header information.
column_names = ['DEC', 'OCT', 'HEX', 'BIN', 'Symbol', 'Description']
column_widths = [4, 6, 4, 10, 7, 20]
# Define the ASCII control character information.
ascii_control_characters = \
"""
0 000 00 00000000 NUL � Null char
1 001 01 00000001 SOH Start of Heading
2 002 02 00000010 STX Start of Text
3 003 03 00000011 ETX End of Text
4 004 04 00000100 EOT End of Transmission
5 005 05 00000101 ENQ Enquiry
6 006 06 00000110 ACK Acknowledgment
7 007 07 00000111 BEL Bell
8 010 08 00001000 BS Back Space
9 011 09 00001001 HT Horizontal Tab
10 012 0A 00001010 LF
Line Feed
11 013 0B 00001011 VT Vertical Tab
12 014 0C 00001100 FF Form Feed
13 015 0D 00001101 CR
Carriage Return
14 016 0E 00001110 SO Shift Out / X-On
15 017 0F 00001111 SI Shift In / X-Off
16 020 10 00010000 DLE Data Line Escape
17 021 11 00010001 DC1 Device Control 1 (oft. XON)
18 022 12 00010010 DC2 Device Control 2
19 023 13 00010011 DC3 Device Control 3 (oft. XOFF)
20 024 14 00010100 DC4 Device Control 4
21 025 15 00010101 NAK Negative Acknowledgement
22 026 16 00010110 SYN Synchronous Idle
23 027 17 00010111 ETB End of Transmit Block
24 030 18 00011000 CAN Cancel
25 031 19 00011001 EM End of Medium
26 032 1A 00011010 SUB Substitute
27 033 1B 00011011 ESC Escape
28 034 1C 00011100 FS File Separator
29 035 1D 00011101 GS Group Separator
30 036 1E 00011110 RS Record Separator
31 037 1F 00011111 US Unit Separator
"""
# Create and set some of the fields of a PDFWriter instance.
pw = PDFWriter("ASCII-Table.pdf")
pw.setFont("Courier", 12)
pw.setHeader("ASCII Control Characters - 0 to 31")
pw.setFooter("Generated by xtopdf: http://slid.es/vasudevram/xtopdf")
# Write the column headings to the output.
column_headings = [ str(val).ljust(column_widths[idx]) \
for idx, val in enumerate(column_names) ]
pw.writeLine(' '.join(column_headings))
# Split the string into lines, omitting the first and last empty lines.
for line in ascii_control_characters.split('\n')[1:-1]:
# Split the line into space-delimited fields.
lis = line.split()
# Join the words of the Description back into one field,
# since it was split due to having internal spaces.
lis2 = lis[0:5] + [' '.join(lis[6:])]
# Write the column data to the output.
lis3 = [ str(val).ljust(column_widths[idx]) \
for idx, val in enumerate(lis2) ]
pw.writeLine(' '.join(lis3))
pw.close()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/579043_Printing_an_ASCII_table_to_PDF/recipe-579043.py",
"copies": "1",
"size": "3604",
"license": "mit",
"hash": -6272295966262396000,
"line_mean": 40.8837209302,
"line_max": 76,
"alpha_frac": 0.5755136036,
"autogenerated": false,
"ratio": 3.062925170068027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9107293496993364,
"avg_score": 0.006229055334932671,
"num_lines": 81
} |
# asciixmas
# December 1989 Larry Bartz Indianapolis, IN
#
# $Id: xmas.py 46623 2006-06-03 22:59:23Z andrew.kuchling $
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the ones I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "Thomas Gellekum <tg@FreeBSD.org>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curses.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
treescrn8.touchwin()
# ALL ON
treescrn.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
return
def deer_step(win, y, x):
win.mvwin(y, x)
win.refresh()
w_del_msg.refresh()
look_out(5)
def reindeer():
y_pos = 0
for x_pos in range(70, 62, -1):
if x_pos < 66: y_pos = 1
for looper in range(0, 4):
dotdeer0.addch(y_pos, x_pos, ord('.'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
look_out(50)
y_pos = 2
for x_pos in range(x_pos - 1, 50, -1):
for looper in range(0, 4):
if x_pos < 56:
y_pos = 3
try:
stardeer0.addch(y_pos, x_pos, ord('*'))
except curses.error:
pass
stardeer0.refresh()
w_del_msg.refresh()
stardeer0.erase()
stardeer0.refresh()
w_del_msg.refresh()
else:
dotdeer0.addch(y_pos, x_pos, ord('*'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
x_pos = 58
for y_pos in range(2, 5):
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 4):
deer_step(lildeer3, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer1, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer3, y_pos, x_pos)
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
x_pos -= 2
x_pos = 35
for y_pos in range(5, 10):
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
for looper in range(2):
deer_step(middeer3, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer1, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer3, y_pos, x_pos)
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
x_pos -= 3
look_out(300)
y_pos = 1
for x_pos in range(8, 16):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
x_pos -= 1
for looper in range(0, 6):
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer1, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer0, y_pos, x_pos)
for y_pos in range(y_pos, 10):
for looper in range(0, 2):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
y_pos -= 1
deer_step(lookdeer3, y_pos, x_pos)
return
def main(win):
global stdscr
stdscr = win
global my_bg, y_pos, x_pos
global treescrn, treescrn2, treescrn3, treescrn4
global treescrn5, treescrn6, treescrn7, treescrn8
global dotdeer0, stardeer0
global lildeer0, lildeer1, lildeer2, lildeer3
global middeer0, middeer1, middeer2, middeer3
global bigdeer0, bigdeer1, bigdeer2, bigdeer3, bigdeer4
global lookdeer0, lookdeer1, lookdeer2, lookdeer3, lookdeer4
global w_holiday, w_del_msg
my_bg = curses.COLOR_BLACK
# curses.curs_set(0)
treescrn = curses.newwin(16, 27, 3, 53)
treescrn2 = curses.newwin(16, 27, 3, 53)
treescrn3 = curses.newwin(16, 27, 3, 53)
treescrn4 = curses.newwin(16, 27, 3, 53)
treescrn5 = curses.newwin(16, 27, 3, 53)
treescrn6 = curses.newwin(16, 27, 3, 53)
treescrn7 = curses.newwin(16, 27, 3, 53)
treescrn8 = curses.newwin(16, 27, 3, 53)
dotdeer0 = curses.newwin(3, 71, 0, 8)
stardeer0 = curses.newwin(4, 56, 0, 8)
lildeer0 = curses.newwin(7, 53, 0, 8)
lildeer1 = curses.newwin(2, 4, 0, 0)
lildeer2 = curses.newwin(2, 4, 0, 0)
lildeer3 = curses.newwin(2, 4, 0, 0)
middeer0 = curses.newwin(15, 42, 0, 8)
middeer1 = curses.newwin(3, 7, 0, 0)
middeer2 = curses.newwin(3, 7, 0, 0)
middeer3 = curses.newwin(3, 7, 0, 0)
bigdeer0 = curses.newwin(10, 23, 0, 0)
bigdeer1 = curses.newwin(10, 23, 0, 0)
bigdeer2 = curses.newwin(10, 23, 0, 0)
bigdeer3 = curses.newwin(10, 23, 0, 0)
bigdeer4 = curses.newwin(10, 23, 0, 0)
lookdeer0 = curses.newwin(10, 25, 0, 0)
lookdeer1 = curses.newwin(10, 25, 0, 0)
lookdeer2 = curses.newwin(10, 25, 0, 0)
lookdeer3 = curses.newwin(10, 25, 0, 0)
lookdeer4 = curses.newwin(10, 25, 0, 0)
w_holiday = curses.newwin(1, 27, 3, 27)
w_del_msg = curses.newwin(1, 20, 23, 60)
try:
w_del_msg.addstr(0, 0, "Hit any key to quit")
except curses.error:
pass
try:
w_holiday.addstr(0, 0, "H A P P Y H O L I D A Y S")
except curses.error:
pass
# set up the windows for our various reindeer
lildeer1.addch(0, 0, ord('V'))
lildeer1.addch(1, 0, ord('@'))
lildeer1.addch(1, 1, ord('<'))
lildeer1.addch(1, 2, ord('>'))
try:
lildeer1.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer2.addch(0, 0, ord('V'))
lildeer2.addch(1, 0, ord('@'))
lildeer2.addch(1, 1, ord('|'))
lildeer2.addch(1, 2, ord('|'))
try:
lildeer2.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer3.addch(0, 0, ord('V'))
lildeer3.addch(1, 0, ord('@'))
lildeer3.addch(1, 1, ord('>'))
lildeer3.addch(1, 2, ord('<'))
try:
lildeer2.addch(1, 3, ord('~')) # XXX
except curses.error:
pass
middeer1.addch(0, 2, ord('y'))
middeer1.addch(0, 3, ord('y'))
middeer1.addch(1, 2, ord('0'))
middeer1.addch(1, 3, ord('('))
middeer1.addch(1, 4, ord('='))
middeer1.addch(1, 5, ord(')'))
middeer1.addch(1, 6, ord('~'))
middeer1.addch(2, 3, ord('\\'))
middeer1.addch(2, 5, ord('/'))
middeer2.addch(0, 2, ord('y'))
middeer2.addch(0, 3, ord('y'))
middeer2.addch(1, 2, ord('0'))
middeer2.addch(1, 3, ord('('))
middeer2.addch(1, 4, ord('='))
middeer2.addch(1, 5, ord(')'))
middeer2.addch(1, 6, ord('~'))
middeer2.addch(2, 3, ord('|'))
middeer2.addch(2, 5, ord('|'))
middeer3.addch(0, 2, ord('y'))
middeer3.addch(0, 3, ord('y'))
middeer3.addch(1, 2, ord('0'))
middeer3.addch(1, 3, ord('('))
middeer3.addch(1, 4, ord('='))
middeer3.addch(1, 5, ord(')'))
middeer3.addch(1, 6, ord('~'))
middeer3.addch(2, 3, ord('/'))
middeer3.addch(2, 5, ord('\\'))
bigdeer1.addch(0, 17, ord('\\'))
bigdeer1.addch(0, 18, ord('/'))
bigdeer1.addch(0, 19, ord('\\'))
bigdeer1.addch(0, 20, ord('/'))
bigdeer1.addch(1, 18, ord('\\'))
bigdeer1.addch(1, 20, ord('/'))
bigdeer1.addch(2, 19, ord('|'))
bigdeer1.addch(2, 20, ord('_'))
bigdeer1.addch(3, 18, ord('/'))
bigdeer1.addch(3, 19, ord('^'))
bigdeer1.addch(3, 20, ord('0'))
bigdeer1.addch(3, 21, ord('\\'))
bigdeer1.addch(4, 17, ord('/'))
bigdeer1.addch(4, 18, ord('/'))
bigdeer1.addch(4, 19, ord('\\'))
bigdeer1.addch(4, 22, ord('\\'))
bigdeer1.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer1.addstr(6, 7, "( \\_____( /") # ))
bigdeer1.addstr(7, 8, "( ) /")
bigdeer1.addstr(8, 9, "\\\\ /")
bigdeer1.addstr(9, 11, "\\>/>")
bigdeer2.addch(0, 17, ord('\\'))
bigdeer2.addch(0, 18, ord('/'))
bigdeer2.addch(0, 19, ord('\\'))
bigdeer2.addch(0, 20, ord('/'))
bigdeer2.addch(1, 18, ord('\\'))
bigdeer2.addch(1, 20, ord('/'))
bigdeer2.addch(2, 19, ord('|'))
bigdeer2.addch(2, 20, ord('_'))
bigdeer2.addch(3, 18, ord('/'))
bigdeer2.addch(3, 19, ord('^'))
bigdeer2.addch(3, 20, ord('0'))
bigdeer2.addch(3, 21, ord('\\'))
bigdeer2.addch(4, 17, ord('/'))
bigdeer2.addch(4, 18, ord('/'))
bigdeer2.addch(4, 19, ord('\\'))
bigdeer2.addch(4, 22, ord('\\'))
bigdeer2.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer2.addstr(6, 7, "(( )____( /") # ))
bigdeer2.addstr(7, 7, "( / |")
bigdeer2.addstr(8, 8, "\\/ |")
bigdeer2.addstr(9, 9, "|> |>")
bigdeer3.addch(0, 17, ord('\\'))
bigdeer3.addch(0, 18, ord('/'))
bigdeer3.addch(0, 19, ord('\\'))
bigdeer3.addch(0, 20, ord('/'))
bigdeer3.addch(1, 18, ord('\\'))
bigdeer3.addch(1, 20, ord('/'))
bigdeer3.addch(2, 19, ord('|'))
bigdeer3.addch(2, 20, ord('_'))
bigdeer3.addch(3, 18, ord('/'))
bigdeer3.addch(3, 19, ord('^'))
bigdeer3.addch(3, 20, ord('0'))
bigdeer3.addch(3, 21, ord('\\'))
bigdeer3.addch(4, 17, ord('/'))
bigdeer3.addch(4, 18, ord('/'))
bigdeer3.addch(4, 19, ord('\\'))
bigdeer3.addch(4, 22, ord('\\'))
bigdeer3.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer3.addstr(6, 6, "( ()_____( /") # ))
bigdeer3.addstr(7, 6, "/ / /")
bigdeer3.addstr(8, 5, "|/ \\")
bigdeer3.addstr(9, 5, "/> \\>")
bigdeer4.addch(0, 17, ord('\\'))
bigdeer4.addch(0, 18, ord('/'))
bigdeer4.addch(0, 19, ord('\\'))
bigdeer4.addch(0, 20, ord('/'))
bigdeer4.addch(1, 18, ord('\\'))
bigdeer4.addch(1, 20, ord('/'))
bigdeer4.addch(2, 19, ord('|'))
bigdeer4.addch(2, 20, ord('_'))
bigdeer4.addch(3, 18, ord('/'))
bigdeer4.addch(3, 19, ord('^'))
bigdeer4.addch(3, 20, ord('0'))
bigdeer4.addch(3, 21, ord('\\'))
bigdeer4.addch(4, 17, ord('/'))
bigdeer4.addch(4, 18, ord('/'))
bigdeer4.addch(4, 19, ord('\\'))
bigdeer4.addch(4, 22, ord('\\'))
bigdeer4.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer4.addstr(6, 6, "( )______( /") # )
bigdeer4.addstr(7, 5, "(/ \\") # )
bigdeer4.addstr(8, 0, "v___= ----^")
lookdeer1.addstr(0, 16, "\\/ \\/")
lookdeer1.addstr(1, 17, "\\Y/ \\Y/")
lookdeer1.addstr(2, 19, "\\=/")
lookdeer1.addstr(3, 17, "^\\o o/^")
lookdeer1.addstr(4, 17, "//( )")
lookdeer1.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer1.addstr(6, 7, "( \\_____( /") # ))
lookdeer1.addstr(7, 8, "( ) /")
lookdeer1.addstr(8, 9, "\\\\ /")
lookdeer1.addstr(9, 11, "\\>/>")
lookdeer2.addstr(0, 16, "\\/ \\/")
lookdeer2.addstr(1, 17, "\\Y/ \\Y/")
lookdeer2.addstr(2, 19, "\\=/")
lookdeer2.addstr(3, 17, "^\\o o/^")
lookdeer2.addstr(4, 17, "//( )")
lookdeer2.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer2.addstr(6, 7, "(( )____( /") # ))
lookdeer2.addstr(7, 7, "( / |")
lookdeer2.addstr(8, 8, "\\/ |")
lookdeer2.addstr(9, 9, "|> |>")
lookdeer3.addstr(0, 16, "\\/ \\/")
lookdeer3.addstr(1, 17, "\\Y/ \\Y/")
lookdeer3.addstr(2, 19, "\\=/")
lookdeer3.addstr(3, 17, "^\\o o/^")
lookdeer3.addstr(4, 17, "//( )")
lookdeer3.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer3.addstr(6, 6, "( ()_____( /") # ))
lookdeer3.addstr(7, 6, "/ / /")
lookdeer3.addstr(8, 5, "|/ \\")
lookdeer3.addstr(9, 5, "/> \\>")
lookdeer4.addstr(0, 16, "\\/ \\/")
lookdeer4.addstr(1, 17, "\\Y/ \\Y/")
lookdeer4.addstr(2, 19, "\\=/")
lookdeer4.addstr(3, 17, "^\\o o/^")
lookdeer4.addstr(4, 17, "//( )")
lookdeer4.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer4.addstr(6, 6, "( )______( /") # )
lookdeer4.addstr(7, 5, "(/ \\") # )
lookdeer4.addstr(8, 0, "v___= ----^")
###############################################
curses.cbreak()
stdscr.nodelay(1)
while 1:
stdscr.clear()
treescrn.erase()
w_del_msg.touchwin()
treescrn.touchwin()
treescrn2.erase()
treescrn2.touchwin()
treescrn8.erase()
treescrn8.touchwin()
stdscr.refresh()
look_out(150)
boxit()
stdscr.refresh()
look_out(150)
seas()
stdscr.refresh()
greet()
stdscr.refresh()
look_out(150)
fromwho()
stdscr.refresh()
look_out(150)
tree()
look_out(150)
balls()
look_out(150)
star()
look_out(150)
strng1()
strng2()
strng3()
strng4()
strng5()
# set up the windows for our blinking trees
#
# treescrn3
treescrn.overlay(treescrn3)
# balls
treescrn3.addch(4, 18, ord(' '))
treescrn3.addch(7, 6, ord(' '))
treescrn3.addch(8, 19, ord(' '))
treescrn3.addch(11, 22, ord(' '))
# star
treescrn3.addch(0, 12, ord('*'))
# strng1
treescrn3.addch(3, 11, ord(' '))
# strng2
treescrn3.addch(5, 13, ord(' '))
treescrn3.addch(6, 10, ord(' '))
# strng3
treescrn3.addch(7, 16, ord(' '))
treescrn3.addch(7, 14, ord(' '))
# strng4
treescrn3.addch(10, 13, ord(' '))
treescrn3.addch(10, 10, ord(' '))
treescrn3.addch(11, 8, ord(' '))
# strng5
treescrn3.addch(11, 18, ord(' '))
treescrn3.addch(12, 13, ord(' '))
# treescrn4
treescrn.overlay(treescrn4)
# balls
treescrn4.addch(3, 9, ord(' '))
treescrn4.addch(4, 16, ord(' '))
treescrn4.addch(7, 6, ord(' '))
treescrn4.addch(8, 19, ord(' '))
treescrn4.addch(11, 2, ord(' '))
treescrn4.addch(12, 23, ord(' '))
# star
treescrn4.standout()
treescrn4.addch(0, 12, ord('*'))
treescrn4.standend()
# strng1
treescrn4.addch(3, 13, ord(' '))
# strng2
# strng3
treescrn4.addch(7, 15, ord(' '))
treescrn4.addch(8, 11, ord(' '))
# strng4
treescrn4.addch(9, 16, ord(' '))
treescrn4.addch(10, 12, ord(' '))
treescrn4.addch(11, 8, ord(' '))
# strng5
treescrn4.addch(11, 18, ord(' '))
treescrn4.addch(12, 14, ord(' '))
# treescrn5
treescrn.overlay(treescrn5)
# balls
treescrn5.addch(3, 15, ord(' '))
treescrn5.addch(10, 20, ord(' '))
treescrn5.addch(12, 1, ord(' '))
# star
treescrn5.addch(0, 12, ord(' '))
# strng1
treescrn5.addch(3, 11, ord(' '))
# strng2
treescrn5.addch(5, 12, ord(' '))
# strng3
treescrn5.addch(7, 14, ord(' '))
treescrn5.addch(8, 10, ord(' '))
# strng4
treescrn5.addch(9, 15, ord(' '))
treescrn5.addch(10, 11, ord(' '))
treescrn5.addch(11, 7, ord(' '))
# strng5
treescrn5.addch(11, 17, ord(' '))
treescrn5.addch(12, 13, ord(' '))
# treescrn6
treescrn.overlay(treescrn6)
# balls
treescrn6.addch(6, 7, ord(' '))
treescrn6.addch(7, 18, ord(' '))
treescrn6.addch(10, 4, ord(' '))
treescrn6.addch(11, 23, ord(' '))
# star
treescrn6.standout()
treescrn6.addch(0, 12, ord('*'))
treescrn6.standend()
# strng1
# strng2
treescrn6.addch(5, 11, ord(' '))
# strng3
treescrn6.addch(7, 13, ord(' '))
treescrn6.addch(8, 9, ord(' '))
# strng4
treescrn6.addch(9, 14, ord(' '))
treescrn6.addch(10, 10, ord(' '))
treescrn6.addch(11, 6, ord(' '))
# strng5
treescrn6.addch(11, 16, ord(' '))
treescrn6.addch(12, 12, ord(' '))
# treescrn7
treescrn.overlay(treescrn7)
# balls
treescrn7.addch(3, 15, ord(' '))
treescrn7.addch(6, 7, ord(' '))
treescrn7.addch(7, 18, ord(' '))
treescrn7.addch(10, 4, ord(' '))
treescrn7.addch(11, 22, ord(' '))
# star
treescrn7.addch(0, 12, ord('*'))
# strng1
treescrn7.addch(3, 12, ord(' '))
# strng2
treescrn7.addch(5, 13, ord(' '))
treescrn7.addch(6, 9, ord(' '))
# strng3
treescrn7.addch(7, 15, ord(' '))
treescrn7.addch(8, 11, ord(' '))
# strng4
treescrn7.addch(9, 16, ord(' '))
treescrn7.addch(10, 12, ord(' '))
treescrn7.addch(11, 8, ord(' '))
# strng5
treescrn7.addch(11, 18, ord(' '))
treescrn7.addch(12, 14, ord(' '))
look_out(150)
reindeer()
w_holiday.touchwin()
w_holiday.refresh()
w_del_msg.refresh()
look_out(500)
for i in range(0, 20):
blinkit()
curses.wrapper(main)
| {
"repo_name": "yqm/sl4a",
"path": "python/src/Demo/curses/xmas.py",
"copies": "34",
"size": "25499",
"license": "apache-2.0",
"hash": -3236774364321500000,
"line_mean": 27.1445916115,
"line_max": 78,
"alpha_frac": 0.5234715087,
"autogenerated": false,
"ratio": 2.5916251651590607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# asciixmas
# December 1989 Larry Bartz Indianapolis, IN
#
# $Id: xmas.py,v 1.1 2000/12/21 16:26:37 akuchling Exp $
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the one's I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "Thomas Gellekum <tg@FreeBSD.org>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curses.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(0, 5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
treescrn8.touchwin()
# ALL ON
treescrn.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
return
def deer_step(win, y, x):
win.mvwin(y, x)
win.refresh()
w_del_msg.refresh()
look_out(5)
def reindeer():
y_pos = 0
for x_pos in range(70, 62, -1):
if x_pos < 66: y_pos = 1
for looper in range(0, 4):
dotdeer0.addch(y_pos, x_pos, ord('.'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
look_out(50)
y_pos = 2
for x_pos in range(x_pos - 1, 50, -1):
for looper in range(0, 4):
if x_pos < 56:
y_pos = 3
try:
stardeer0.addch(y_pos, x_pos, ord('*'))
except curses.error:
pass
stardeer0.refresh()
w_del_msg.refresh()
stardeer0.erase()
stardeer0.refresh()
w_del_msg.refresh()
else:
dotdeer0.addch(y_pos, x_pos, ord('*'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
x_pos = 58
for y_pos in range(2, 5):
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 4):
deer_step(lildeer3, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer1, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer3, y_pos, x_pos)
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
x_pos -= 2
x_pos = 35
for y_pos in range(5, 10):
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 2):
deer_step(middeer3, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer1, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer3, y_pos, x_pos)
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
x_pos -= 3
look_out(300)
y_pos = 1
for x_pos in range(8, 16):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
x_pos -= 1
for looper in range(0, 6):
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer1, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer0, y_pos, x_pos)
for y_pos in range(y_pos, 10):
for looper in range(0, 2):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
y_pos -= 1
deer_step(lookdeer3, y_pos, x_pos)
return
def main(win):
global stdscr
stdscr = win
global my_bg, y_pos, x_pos
global treescrn, treescrn2, treescrn3, treescrn4
global treescrn5, treescrn6, treescrn7, treescrn8
global dotdeer0, stardeer0
global lildeer0, lildeer1, lildeer2, lildeer3
global middeer0, middeer1, middeer2, middeer3
global bigdeer0, bigdeer1, bigdeer2, bigdeer3, bigdeer4
global lookdeer0, lookdeer1, lookdeer2, lookdeer3, lookdeer4
global w_holiday, w_del_msg
my_bg = curses.COLOR_BLACK
# curses.curs_set(0)
treescrn = curses.newwin(16, 27, 3, 53)
treescrn2 = curses.newwin(16, 27, 3, 53)
treescrn3 = curses.newwin(16, 27, 3, 53)
treescrn4 = curses.newwin(16, 27, 3, 53)
treescrn5 = curses.newwin(16, 27, 3, 53)
treescrn6 = curses.newwin(16, 27, 3, 53)
treescrn7 = curses.newwin(16, 27, 3, 53)
treescrn8 = curses.newwin(16, 27, 3, 53)
dotdeer0 = curses.newwin(3, 71, 0, 8)
stardeer0 = curses.newwin(4, 56, 0, 8)
lildeer0 = curses.newwin(7, 53, 0, 8)
lildeer1 = curses.newwin(2, 4, 0, 0)
lildeer2 = curses.newwin(2, 4, 0, 0)
lildeer3 = curses.newwin(2, 4, 0, 0)
middeer0 = curses.newwin(15, 42, 0, 8)
middeer1 = curses.newwin(3, 7, 0, 0)
middeer2 = curses.newwin(3, 7, 0, 0)
middeer3 = curses.newwin(3, 7, 0, 0)
bigdeer0 = curses.newwin(10, 23, 0, 0)
bigdeer1 = curses.newwin(10, 23, 0, 0)
bigdeer2 = curses.newwin(10, 23, 0, 0)
bigdeer3 = curses.newwin(10, 23, 0, 0)
bigdeer4 = curses.newwin(10, 23, 0, 0)
lookdeer0 = curses.newwin(10, 25, 0, 0)
lookdeer1 = curses.newwin(10, 25, 0, 0)
lookdeer2 = curses.newwin(10, 25, 0, 0)
lookdeer3 = curses.newwin(10, 25, 0, 0)
lookdeer4 = curses.newwin(10, 25, 0, 0)
w_holiday = curses.newwin(1, 27, 3, 27)
w_del_msg = curses.newwin(1, 20, 23, 60)
try:
w_del_msg.addstr(0, 0, "Hit any key to quit")
except curses.error:
pass
try:
w_holiday.addstr(0, 0, "H A P P Y H O L I D A Y S")
except curses.error:
pass
# set up the windows for our various reindeer
lildeer1.addch(0, 0, ord('V'))
lildeer1.addch(1, 0, ord('@'))
lildeer1.addch(1, 1, ord('<'))
lildeer1.addch(1, 2, ord('>'))
try:
lildeer1.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer2.addch(0, 0, ord('V'))
lildeer2.addch(1, 0, ord('@'))
lildeer2.addch(1, 1, ord('|'))
lildeer2.addch(1, 2, ord('|'))
try:
lildeer2.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer3.addch(0, 0, ord('V'))
lildeer3.addch(1, 0, ord('@'))
lildeer3.addch(1, 1, ord('>'))
lildeer3.addch(1, 2, ord('<'))
try:
lildeer2.addch(1, 3, ord('~')) # XXX
except curses.error:
pass
middeer1.addch(0, 2, ord('y'))
middeer1.addch(0, 3, ord('y'))
middeer1.addch(1, 2, ord('0'))
middeer1.addch(1, 3, ord('('))
middeer1.addch(1, 4, ord('='))
middeer1.addch(1, 5, ord(')'))
middeer1.addch(1, 6, ord('~'))
middeer1.addch(2, 3, ord('\\'))
middeer1.addch(2, 5, ord('/'))
middeer2.addch(0, 2, ord('y'))
middeer2.addch(0, 3, ord('y'))
middeer2.addch(1, 2, ord('0'))
middeer2.addch(1, 3, ord('('))
middeer2.addch(1, 4, ord('='))
middeer2.addch(1, 5, ord(')'))
middeer2.addch(1, 6, ord('~'))
middeer2.addch(2, 3, ord('|'))
middeer2.addch(2, 5, ord('|'))
middeer3.addch(0, 2, ord('y'))
middeer3.addch(0, 3, ord('y'))
middeer3.addch(1, 2, ord('0'))
middeer3.addch(1, 3, ord('('))
middeer3.addch(1, 4, ord('='))
middeer3.addch(1, 5, ord(')'))
middeer3.addch(1, 6, ord('~'))
middeer3.addch(2, 3, ord('/'))
middeer3.addch(2, 5, ord('\\'))
bigdeer1.addch(0, 17, ord('\\'))
bigdeer1.addch(0, 18, ord('/'))
bigdeer1.addch(0, 19, ord('\\'))
bigdeer1.addch(0, 20, ord('/'))
bigdeer1.addch(1, 18, ord('\\'))
bigdeer1.addch(1, 20, ord('/'))
bigdeer1.addch(2, 19, ord('|'))
bigdeer1.addch(2, 20, ord('_'))
bigdeer1.addch(3, 18, ord('/'))
bigdeer1.addch(3, 19, ord('^'))
bigdeer1.addch(3, 20, ord('0'))
bigdeer1.addch(3, 21, ord('\\'))
bigdeer1.addch(4, 17, ord('/'))
bigdeer1.addch(4, 18, ord('/'))
bigdeer1.addch(4, 19, ord('\\'))
bigdeer1.addch(4, 22, ord('\\'))
bigdeer1.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer1.addstr(6, 7, "( \\_____( /") # ))
bigdeer1.addstr(7, 8, "( ) /")
bigdeer1.addstr(8, 9, "\\\\ /")
bigdeer1.addstr(9, 11, "\\>/>")
bigdeer2.addch(0, 17, ord('\\'))
bigdeer2.addch(0, 18, ord('/'))
bigdeer2.addch(0, 19, ord('\\'))
bigdeer2.addch(0, 20, ord('/'))
bigdeer2.addch(1, 18, ord('\\'))
bigdeer2.addch(1, 20, ord('/'))
bigdeer2.addch(2, 19, ord('|'))
bigdeer2.addch(2, 20, ord('_'))
bigdeer2.addch(3, 18, ord('/'))
bigdeer2.addch(3, 19, ord('^'))
bigdeer2.addch(3, 20, ord('0'))
bigdeer2.addch(3, 21, ord('\\'))
bigdeer2.addch(4, 17, ord('/'))
bigdeer2.addch(4, 18, ord('/'))
bigdeer2.addch(4, 19, ord('\\'))
bigdeer2.addch(4, 22, ord('\\'))
bigdeer2.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer2.addstr(6, 7, "(( )____( /") # ))
bigdeer2.addstr(7, 7, "( / |")
bigdeer2.addstr(8, 8, "\\/ |")
bigdeer2.addstr(9, 9, "|> |>")
bigdeer3.addch(0, 17, ord('\\'))
bigdeer3.addch(0, 18, ord('/'))
bigdeer3.addch(0, 19, ord('\\'))
bigdeer3.addch(0, 20, ord('/'))
bigdeer3.addch(1, 18, ord('\\'))
bigdeer3.addch(1, 20, ord('/'))
bigdeer3.addch(2, 19, ord('|'))
bigdeer3.addch(2, 20, ord('_'))
bigdeer3.addch(3, 18, ord('/'))
bigdeer3.addch(3, 19, ord('^'))
bigdeer3.addch(3, 20, ord('0'))
bigdeer3.addch(3, 21, ord('\\'))
bigdeer3.addch(4, 17, ord('/'))
bigdeer3.addch(4, 18, ord('/'))
bigdeer3.addch(4, 19, ord('\\'))
bigdeer3.addch(4, 22, ord('\\'))
bigdeer3.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer3.addstr(6, 6, "( ()_____( /") # ))
bigdeer3.addstr(7, 6, "/ / /")
bigdeer3.addstr(8, 5, "|/ \\")
bigdeer3.addstr(9, 5, "/> \\>")
bigdeer4.addch(0, 17, ord('\\'))
bigdeer4.addch(0, 18, ord('/'))
bigdeer4.addch(0, 19, ord('\\'))
bigdeer4.addch(0, 20, ord('/'))
bigdeer4.addch(1, 18, ord('\\'))
bigdeer4.addch(1, 20, ord('/'))
bigdeer4.addch(2, 19, ord('|'))
bigdeer4.addch(2, 20, ord('_'))
bigdeer4.addch(3, 18, ord('/'))
bigdeer4.addch(3, 19, ord('^'))
bigdeer4.addch(3, 20, ord('0'))
bigdeer4.addch(3, 21, ord('\\'))
bigdeer4.addch(4, 17, ord('/'))
bigdeer4.addch(4, 18, ord('/'))
bigdeer4.addch(4, 19, ord('\\'))
bigdeer4.addch(4, 22, ord('\\'))
bigdeer4.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer4.addstr(6, 6, "( )______( /") # )
bigdeer4.addstr(7, 5, "(/ \\") # )
bigdeer4.addstr(8, 0, "v___= ----^")
lookdeer1.addstr(0, 16, "\\/ \\/")
lookdeer1.addstr(1, 17, "\\Y/ \\Y/")
lookdeer1.addstr(2, 19, "\\=/")
lookdeer1.addstr(3, 17, "^\\o o/^")
lookdeer1.addstr(4, 17, "//( )")
lookdeer1.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer1.addstr(6, 7, "( \\_____( /") # ))
lookdeer1.addstr(7, 8, "( ) /")
lookdeer1.addstr(8, 9, "\\\\ /")
lookdeer1.addstr(9, 11, "\\>/>")
lookdeer2.addstr(0, 16, "\\/ \\/")
lookdeer2.addstr(1, 17, "\\Y/ \\Y/")
lookdeer2.addstr(2, 19, "\\=/")
lookdeer2.addstr(3, 17, "^\\o o/^")
lookdeer2.addstr(4, 17, "//( )")
lookdeer2.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer2.addstr(6, 7, "(( )____( /") # ))
lookdeer2.addstr(7, 7, "( / |")
lookdeer2.addstr(8, 8, "\\/ |")
lookdeer2.addstr(9, 9, "|> |>")
lookdeer3.addstr(0, 16, "\\/ \\/")
lookdeer3.addstr(1, 17, "\\Y/ \\Y/")
lookdeer3.addstr(2, 19, "\\=/")
lookdeer3.addstr(3, 17, "^\\o o/^")
lookdeer3.addstr(4, 17, "//( )")
lookdeer3.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer3.addstr(6, 6, "( ()_____( /") # ))
lookdeer3.addstr(7, 6, "/ / /")
lookdeer3.addstr(8, 5, "|/ \\")
lookdeer3.addstr(9, 5, "/> \\>")
lookdeer4.addstr(0, 16, "\\/ \\/")
lookdeer4.addstr(1, 17, "\\Y/ \\Y/")
lookdeer4.addstr(2, 19, "\\=/")
lookdeer4.addstr(3, 17, "^\\o o/^")
lookdeer4.addstr(4, 17, "//( )")
lookdeer4.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer4.addstr(6, 6, "( )______( /") # )
lookdeer4.addstr(7, 5, "(/ \\") # )
lookdeer4.addstr(8, 0, "v___= ----^")
###############################################
curses.cbreak()
stdscr.nodelay(1)
while 1:
stdscr.clear()
treescrn.erase()
w_del_msg.touchwin()
treescrn.touchwin()
treescrn2.erase()
treescrn2.touchwin()
treescrn8.erase()
treescrn8.touchwin()
stdscr.refresh()
look_out(150)
boxit()
stdscr.refresh()
look_out(150)
seas()
stdscr.refresh()
greet()
stdscr.refresh()
look_out(150)
fromwho()
stdscr.refresh()
look_out(150)
tree()
look_out(150)
balls()
look_out(150)
star()
look_out(150)
strng1()
strng2()
strng3()
strng4()
strng5()
# set up the windows for our blinking trees
#
# treescrn3
treescrn.overlay(treescrn3)
# balls
treescrn3.addch(4, 18, ord(' '))
treescrn3.addch(7, 6, ord(' '))
treescrn3.addch(8, 19, ord(' '))
treescrn3.addch(11, 22, ord(' '))
# star
treescrn3.addch(0, 12, ord('*'))
# strng1
treescrn3.addch(3, 11, ord(' '))
# strng2
treescrn3.addch(5, 13, ord(' '))
treescrn3.addch(6, 10, ord(' '))
# strng3
treescrn3.addch(7, 16, ord(' '))
treescrn3.addch(7, 14, ord(' '))
# strng4
treescrn3.addch(10, 13, ord(' '))
treescrn3.addch(10, 10, ord(' '))
treescrn3.addch(11, 8, ord(' '))
# strng5
treescrn3.addch(11, 18, ord(' '))
treescrn3.addch(12, 13, ord(' '))
# treescrn4
treescrn.overlay(treescrn4)
# balls
treescrn4.addch(3, 9, ord(' '))
treescrn4.addch(4, 16, ord(' '))
treescrn4.addch(7, 6, ord(' '))
treescrn4.addch(8, 19, ord(' '))
treescrn4.addch(11, 2, ord(' '))
treescrn4.addch(12, 23, ord(' '))
# star
treescrn4.standout()
treescrn4.addch(0, 12, ord('*'))
treescrn4.standend()
# strng1
treescrn4.addch(3, 13, ord(' '))
# strng2
# strng3
treescrn4.addch(7, 15, ord(' '))
treescrn4.addch(8, 11, ord(' '))
# strng4
treescrn4.addch(9, 16, ord(' '))
treescrn4.addch(10, 12, ord(' '))
treescrn4.addch(11, 8, ord(' '))
# strng5
treescrn4.addch(11, 18, ord(' '))
treescrn4.addch(12, 14, ord(' '))
# treescrn5
treescrn.overlay(treescrn5)
# balls
treescrn5.addch(3, 15, ord(' '))
treescrn5.addch(10, 20, ord(' '))
treescrn5.addch(12, 1, ord(' '))
# star
treescrn5.addch(0, 12, ord(' '))
# strng1
treescrn5.addch(3, 11, ord(' '))
# strng2
treescrn5.addch(5, 12, ord(' '))
# strng3
treescrn5.addch(7, 14, ord(' '))
treescrn5.addch(8, 10, ord(' '))
# strng4
treescrn5.addch(9, 15, ord(' '))
treescrn5.addch(10, 11, ord(' '))
treescrn5.addch(11, 7, ord(' '))
# strng5
treescrn5.addch(11, 17, ord(' '))
treescrn5.addch(12, 13, ord(' '))
# treescrn6
treescrn.overlay(treescrn6)
# balls
treescrn6.addch(6, 7, ord(' '))
treescrn6.addch(7, 18, ord(' '))
treescrn6.addch(10, 4, ord(' '))
treescrn6.addch(11, 23, ord(' '))
# star
treescrn6.standout()
treescrn6.addch(0, 12, ord('*'))
treescrn6.standend()
# strng1
# strng2
treescrn6.addch(5, 11, ord(' '))
# strng3
treescrn6.addch(7, 13, ord(' '))
treescrn6.addch(8, 9, ord(' '))
# strng4
treescrn6.addch(9, 14, ord(' '))
treescrn6.addch(10, 10, ord(' '))
treescrn6.addch(11, 6, ord(' '))
# strng5
treescrn6.addch(11, 16, ord(' '))
treescrn6.addch(12, 12, ord(' '))
# treescrn7
treescrn.overlay(treescrn7)
# balls
treescrn7.addch(3, 15, ord(' '))
treescrn7.addch(6, 7, ord(' '))
treescrn7.addch(7, 18, ord(' '))
treescrn7.addch(10, 4, ord(' '))
treescrn7.addch(11, 22, ord(' '))
# star
treescrn7.addch(0, 12, ord('*'))
# strng1
treescrn7.addch(3, 12, ord(' '))
# strng2
treescrn7.addch(5, 13, ord(' '))
treescrn7.addch(6, 9, ord(' '))
# strng3
treescrn7.addch(7, 15, ord(' '))
treescrn7.addch(8, 11, ord(' '))
# strng4
treescrn7.addch(9, 16, ord(' '))
treescrn7.addch(10, 12, ord(' '))
treescrn7.addch(11, 8, ord(' '))
# strng5
treescrn7.addch(11, 18, ord(' '))
treescrn7.addch(12, 14, ord(' '))
look_out(150)
reindeer()
w_holiday.touchwin()
w_holiday.refresh()
w_del_msg.refresh()
look_out(500)
for i in range(0, 20):
blinkit()
curses.wrapper(main)
| {
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"path": "Demo/curses/xmas.py",
"copies": "1",
"size": "24891",
"license": "mit",
"hash": -6949883812525731000,
"line_mean": 26.4735099338,
"line_max": 78,
"alpha_frac": 0.5361375598,
"autogenerated": false,
"ratio": 2.50993243924574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8475382248647314,
"avg_score": 0.014137550079685299,
"num_lines": 906
} |
""" ascl.php-out_1.0.py - program for downloading and processing the ascl.php webpage of ASCL journal
entries for a user defined date (yy/mm), and outputting a structed txt file of journal entry fields
grouped and sorted for ADS submission, as well as an xls file of the fields for the user's use. Program
assumes that the php file accessed will adhere to the coding conventions used and identified in this
application.
"""
import os
import urllib
import xlwt
import time
# User interface
print "_________________________________________________________________________\n"
print "|* ____________________________________________________________________ *|\n"
print "|*| ASCL.PHP To ADS Report + Workbook |*|\n"
print "|*| Version 1.0 |*|\n"
print "|*|___________________________________________________________________ |*|\n"
print "|************************************************************************|\n"
year = raw_input(" Enter year to query:")
while(year.isdigit() == False or int(year) < 0 or int(year) > 99 or len(year) != 2):
print "\n Please enter the year as an integer"
print " in the format YY ...\n"
year = raw_input(" Enter year to query:")
month = raw_input("\n Enter month to query:")
while(month.isdigit() == False or int(month) < 1 or int(month) > 12 or len(month) != 2):
print "\n Please enter the month as an integer"
print " in the format MM \n"
month = raw_input("\n Enter month to query:")
# Starts timer
start = time.clock()
print "\n\n Downloading ascl.php...\n"
# Open url, read in php source to .txt and close file.
datasource = urllib.urlopen("http://asterisk.apod.com/ascl.php")
f = open('workfile.txt', 'w')
while 1:
line = datasource.readline()
f.write(line)
if line == "</table>": break
f.close()
print " Workfile.txt created...\n"
#Open source file for reading
f = open('workfile.txt', 'r')
# Initialize list to hold entry string in order
array = []
# Reads in first line
line = f.readline()
# While loop runs until line read in is end of php table tag.
while(line != "</table>"):
# Read in line
line = f.readline()
# If line is element of fields
if(line.find("<tr><td>") != -1):
# Check if readline read in end of entry '</a>'
while(line.find("</td></tr>") == -1):
tmp = f.readline()
line += tmp
# Parse string to form 4 list elements
par = line.split("ascl:")
par = par[1].split("</td><td>")
# If parsed year and month match, append entry to list of unsorted entries.
if(par[0][0:2] == year and par[0][2:4] == month):
array.append([int(par[0][5:]), par])
# If no entries found matching date, exit.
if(len(array) == 0):
print " No entries matching query."
wait = input(" Press Enter to exit.")
exit()
# If there are entries in array, create and/or change directory
if(os.path.exists("Output") != True):
os.mkdir("Output")
os.chdir("Output")
if(os.path.exists("ascl." + year + month) != True):
os.mkdir("ascl." + year + month)
os.chdir("ascl." + year + month)
# If there are entries in array, open .txt and .xls reports for writing.
o = open('ACLS_ADS_data_build_file.' + year + month + '.txt' , 'w')
print " ACLS_ADS_data_build_file." + year + month + ".txt Created"
ss = xlwt.Workbook(encoding="utf-8")
page = ss.add_sheet("sheet",cell_overwrite_ok=True)
print " ACLS_ADS_data_build_file." + year + month + ".xls Created"
# Sort array of unsorted entries.
array.sort()
# Process array of entries and send to files.
for j in range(0,len(array)):
line = array[j][1]
# Search and replace html tags
for i in range(0,4):
line[i] = str(line[i])
# Search and replace various html tags
line[i] = line[i].replace("<ul><li>", " ")
line[i] = line[i].replace("</li><li>", "\n ")
line[i] = line[i].replace("</li></ul>", "\n ")
line[i] = line[i].replace(">", ">")
line[i] = line[i].replace("<", "<")
line[i] = line[i].replace(""", '"')
line[i] = line[i].replace("&", "&")
line[i] = line[i].replace("<ul>", " ")
line[i] = line[i].replace("</ul>", "")
line[i] = line[i].replace('</span>', "")
line[i] = line[i].replace('<sup>', "^")
line[i] = line[i].replace('</sup>', "")
line[i] = line[i].replace('<sub>', "")
line[i] = line[i].replace('</sub>', "")
line[i] = line[i].replace('</div>', "")
# Search and replace unicode entities with ascii chars
while(line[i].find("&#") != -1):
a = line[i].find("&#")
b = line[i][a:].find(';')
num = int(line[i][a+2:a+b])
ch = unichr(num)
line[i] = line[i][0:a] + ch + line[i][a+3+b:]
# Search for hyperlinks and replace until there are none.
while(line[i].find("<a href") != -1):
# Parses tags around hyperlink text and saves text field.
tmp = line[i].split("<a href=")
tmp = tmp[1].split(">")
tmp = tmp[1].split("</a")
tmp = tmp[0]
# Locates indices of beginning and end of url and cuts it from line[i]
a = line[i].find("f=") + 1
b = line[i].find("class") - 1
line[i] = line[i][0:a] + line[i][b:]
# Locates name field within hyperlink and replaces link tags with only text
loc = line[i].find(">" + tmp + "<") + 1
a = line[i].find("<a href")
b = line[i].find(tmp + "</a>")
line[i] = line[i][0:a] + tmp + line[i][loc+len(tmp)+4:]
# Searches for parenthesized hyperlinks and replaces with url text.
while(line[i].find("<!-- m -->") != -1):
# Parse tags around url to extract it
tmp = line[i].split('"postlink" href="')
tmp = tmp[1].split('">http:')
tmp = tmp[0]
# Replace tag with just url in field.
a = line[i].find("<!-- m -->")
b = line[i].find("</a><!-- m -->")
line[i] = line[i][0:a] + tmp + line[i][b+14:]
# Searches for span tags
while(line[i].find("<span style=") != -1):
a = line[i].find("<span style=")
b = line[i][a:].find(">")
line[i] = line[i][0:a] + line[i][a+b+1:]
# Searches for ol tags
while(line[i].find("<ol style=") != -1):
a = line[i].find("<ol style=")
b = line[i].find("<li>") + 4
c = line[i].find("</li></ol>")
line[i] = line[i][0:a] + line[i][b:]
loc = b - (b-a)
count = 1
while(line[i][loc:].find("</li></ol>") != -1):
period = line[i][loc:].find('.') + loc
if(count==1):
line[i] = line[i][0:loc] + str(count) + ". " + line[i][loc:]
else: line[i] = line[i][0:loc] + str(count) + ". " + line[i][loc:]
loc = period + 4 + len(str(count))
count += 1
line[i] = line[i][0:loc-5] + line[i][loc+10:]
# Searches for fiv tags
while(line[i].find("<div style=") != -1):
a = line[i].find("<div style=")
b = line[i][a:].find(">") + a
line[i] = line[i][0:a] + line[i][b+1:]
# Write output to ADS file according to upload file template.
o.write("%T " + line[1] + "\n")
o.write("%A " + line[3] + "\n")
o.write("%J Astrophysics Source Code Library, record ascl:" + year + month + line[0][4:] + "\n")
o.write("%D " + month + "/20" + year + "\n")
o.write("%B " + line[2] + "\n")
o.write("%I ELECTR: http://ascl.net/" + year + month + line[0][9:21] + line[0][4:])
o.write("\n\n")
# Spreadsheet commands
page.write(j, 0, line[1])
page.write(j, 1, line[3])
page.write(j, 2, "Astrophysics Source Code Library, record " + year + month + line[0][4:])
page.write(j, 3, month + "/20" + year)
page.write(j, 4, "http://ascl.net/" + year + month + line[0][9:21]+ line[0][4:])
# Save Spreadsheet
ss.save("ACLS_ADS_data_build_file." + year + month + ".xls")
print " ACLS_ADS_data_build_file." + year + month + ".xls Finished"
# Close input and output.
o.close()
f.close()
print " ACLS_ADS_data_build_file." + year + month + ".txt Finished"
# Ends timer and prints final output.
elapsed = (time.clock() - start)
elapsed = "%.2f" % elapsed
elapsed = str(elapsed)
print "\n Done! Finished in " + elapsed + " seconds."
wait = input(" Press Enter to exit.")
| {
"repo_name": "jconenna/ASCL-Out",
"path": "source.py",
"copies": "1",
"size": "9215",
"license": "mit",
"hash": -2354478182651200000,
"line_mean": 38.8917748918,
"line_max": 103,
"alpha_frac": 0.4813890396,
"autogenerated": false,
"ratio": 3.459084084084084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9370167253623751,
"avg_score": 0.014061174012066548,
"num_lines": 231
} |
import re
import os
import SCons.Action
import SCons.Builder
import SCons.Scanner
## TODO - improve these regular expressions
output_re = [
re.compile(r'''png\('([^']+)'\)''', re.M)
, re.compile(r'''^[^#]*save\(.*file\s*=\s*['"]([^'"]+)['"]\s*[),].*$''', re.M)
, re.compile(r'''sink\(.*file\s*=\s*'([^']+)'\s*[),]''', re.M)
, re.compile(r'''ggsave\(.*filename\s*=\s*['"]([^'"]+)['"]\s*[),]''', re.M)
]
## TODO - improve these regular expressions
source_re = [
re.compile(r'''source\('([^']+)'\)''', re.M)
, re.compile(r'''load\(['"]([^'"]+)['"]\)''', re.M)
, re.compile(r'''read\.table\(['"]([^'"]+)''', re.M)
]
def findall(contents, regular_expressions):
''' Runs findall(contents) using every re in 'regular_expressions'
and returns a list off all matches.
'''
matches = []
for re in regular_expressions:
matches.extend(re.findall(contents))
return matches
def emit_r(target, source, env):
target = []
for s in source:
sdir = os.path.dirname(str(s))
contents = s.get_contents()
# find output files
matches = findall(contents, output_re)
# only iterate over the unique entries
for f in set(matches):
target.append(os.path.join(sdir, f))
return target, source
def search_deps_r(node, env):
contents = node.get_contents()
deps = []
# find input files
matches = findall(contents, source_re)
# only iterate over the unique entries
for f in set(matches):
dep_path = os.path.join(os.path.dirname(str(node)), f)
dep_file = env.File(dep_path)
deps.append(dep_file)
deps.extend(search_deps_r(dep_file, env))
return deps
def scan_r(node, env, path):
return search_deps_r(node, env)
scanner_r = SCons.Scanner.Base(
name = 'R Scanner',
function = scan_r,
skeys = ['.r', '.R'],
path_function = SCons.Scanner.FindPathDirs('RPATH'),
recursive = True)
builder_r = SCons.Builder.Builder(
action = SCons.Action.Action('$RCOM', chdir=1),
src_suffix = ['.r','.R'],
emitter = emit_r,
source_scanner = scanner_r)
def generate(env):
env['BUILDERS']['R'] = builder_r
env['R'] = ['R']
env['RPATH'] = ['.']
env['RFLAGS'] = SCons.Util.CLVar('--vanilla')
env['RCOM'] = '$R $RFLAGS < ${SOURCE.file}'
def exists(env):
return env.Detect('R')
| {
"repo_name": "kboyd/scons_r",
"path": "__init__.py",
"copies": "1",
"size": "2561",
"license": "bsd-2-clause",
"hash": 4959422635384093000,
"line_mean": 26.5376344086,
"line_max": 86,
"alpha_frac": 0.5626708317,
"autogenerated": false,
"ratio": 3.193266832917706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4255937664617706,
"avg_score": null,
"num_lines": null
} |
"""A scraper for downloading checklists from eBird.
This scraper creates checklists for recent observations for a given region
using the eBird API. Additional information for each checklist is also
scraped from the checklist web page.
"""
import json
import os
import re
from scrapy import log
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from scrapy.spider import BaseSpider
from checklists_scrapers.spiders import DOWNLOAD_FORMAT, DOWNLOAD_LANGUAGE
from checklists_scrapers.spiders.utils import remove_whitespace, select_keys, dedup, \
save_json_data
class JSONParser(object):
"""Extract checklists from JSON data returned from the eBird API."""
location_keys = [
'locID',
'locName',
'subnational1Name',
'subnational2Name',
'countryName',
'lat',
'lng',
]
checklist_keys = [
'firstName',
'lastName',
'obsDt',
'subID',
] + location_keys
def __init__(self, response):
"""Initialize the parser with a JSON encoded response.
Args:
response (str): an encoded string containing the JSON data returned
by a call to the eBird API.
Returns:
JSONParser: a JSONParser object with the records decided from
the JSON data.
"""
self.records = json.loads(response.body_as_unicode())
def get_checklists(self):
"""Get the set of checklists from the observations."""
filtered = dedup(select_keys(self.records, self.checklist_keys))
checklists = [self.get_checklist(record) for record in filtered]
for checklist in checklists:
checklist['entries'] = [self.get_entry(r) for r in self.records
if r['subID'] == checklist['identifier']]
return checklists
def get_checklist(self, record):
"""Get the fields for a checklist from an observation.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the checklist fields.
"""
checklist = {
'meta': {
'version': DOWNLOAD_FORMAT,
'language': DOWNLOAD_LANGUAGE,
},
'identifier': record['subID'].strip(),
'date': record['obsDt'].strip().split(' ')[0],
'location': self.get_location(record),
'observers': self.get_observers(record),
'source': self.get_source(record),
}
if ' ' in record['obsDt']:
checklist['protocol'] = self.get_protocol(record)
return checklist
def get_protocol(self, record):
"""Get the information about the checklist protocol.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the protocol fields.
A default protocol name of 'Incidental' is used since only the time
of the observation is currently available.
"""
return {
'name': 'Incidental',
'time': record['obsDt'].strip().split(' ')[1],
}
def get_observers(self, record):
"""Get the information about the checklist observers.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the list of observers names.
"""
first_name = record['firstName'].strip()
last_name = record['lastName'].strip()
return {
'count': 1,
'names': [first_name + ' ' + last_name],
}
def get_source(self, record):
"""Get the information about the source of the checklist.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the source fields.
"""
first_name = record['firstName'].strip()
last_name = record['lastName'].strip()
return {
'name': 'eBird',
'submitted_by': first_name + ' ' + last_name,
}
def get_locations(self):
"""Get the set of locations from the observations.
Returns:
list(dict): a list of dicts containing the fields for a location.
"""
filtered = dedup(select_keys(self.records, self.location_keys))
return [self.get_location(record) for record in filtered]
def get_location(self, record):
"""Get the fields for a location from an observation.
Returns:
dict: a dictionary containing the fields for a location.
If a given field is not present in the record then the value defaults
to an empty string. This allows the method to process records that
contain either the simple results fields or the full results fields.
"""
return {
'identifier': record['locID'],
'name': record['locName'],
'county': record.get('subnational2Name', ''),
'region': record.get('subnational1Name', ''),
'country': record.get('countryName', ''),
'lat': record['lat'],
'lon': record['lng'],
}
def get_entry(self, record):
"""Get the fields for an entry from an observation.
Returns:
dict: a dictionary containing the fields for a checklist entry.
"""
return {
'identifier': record['obsID'],
'species': self.get_species(record),
'count': record.get('howMany', 0),
}
def get_species(self, record):
"""Get the species fields for an entry from an observation.
Args:
record (dict); the observation record,
Returns:
dict: a dictionary containing the fields for a species.
"""
return {
'name': record['comName'],
'scientific_name': record['sciName'],
}
class HTMLParser(object):
"""Extract information from the checklist web page.
Only the information not available through the API is extracted, with the
exception of the counts for each species- which has the associated details
dictionary which contains a breakdown of the count based on age and sex.
"""
# eBird mixes up activities and protocols a bit so this table is used
# to map protocol names onto an activity and alternative protocol.
activities = {
'Nocturnal Flight Call Count': (
'Nocturnal Flight Call Count', 'Stationary'),
'Heron Area Count': ('Heron Count', 'Area'),
'Heron Stationary Count': ('Heron Count', 'Stationary'),
}
default_activity = 'Birding'
def __init__(self, response):
"""Initialize the parser with an HTML encoded response.
Args:
response (str): the contents of the checklist web page.
Returns:
HTMLParser: an HTMLParser object containing the contents of the
checklist web page and a dict containing the main checklist
attributes.
"""
self.docroot = HtmlXPathSelector(response)
self.attributes = self.get_attributes(self.docroot)
def get_attributes(self, node):
"""Get the checklist attributes.
Args:
node (HtmlXPathSelector): an XML node,
Returns:
dict: a dictionary containing the fields and values of a checklist.
"""
attr = {}
for idx, item in enumerate(node.select('//dl/dt/text()')):
key = item.extract().strip()
if key == 'Observers:':
names = []
values = node.select('//dl/dd')[idx].select('text()').extract()
for value in values:
name = value.replace(',', '').strip()
if name:
names.append(name)
values = node.select('//dl/dd')[idx]\
.select('strong/text()').extract()
for value in values:
name = value.replace(',', '').strip()
if name:
names.append(name)
attr[key] = ','.join(names)
else:
value = node.select('//dl/dd')[idx].select('text()').extract()
attr[key] = value[0].strip()
return attr
def get_checklist(self):
"""Get the checklist fields extracted ffrom the HTML response.
Returns:
dict: a checklist containing the fields extract from the HTML.
Only the fields not available through the API are extracted from the
HTML. The parser can be sub-classed to extract any more information.
"""
return {
'observers': self.get_observers(),
'activity': self.get_activity(),
'protocol': self.get_protocol(),
'entries': self.get_entries(),
'comment': self.attributes.get('Comments:', '')
}
def get_protocol(self):
"""Get the protocol used for the checklist.
Returns:
dict: a dictionary containing the fields describing the protocol
used to count the birds recorded in the checklist.
"""
protocol_name = self.attributes.get('Protocol:', None)
if protocol_name in self.activities:
protocol_name = self.activities[protocol_name][1]
duration_str = self.attributes.get('Duration:', '')
if 'hour' in duration_str:
duration_hours = int(re.search(
r'(\d+) h', duration_str).group(1))
else:
duration_hours = 0
if 'min' in duration_str:
duration_minutes = int(re.search(
r'(\d+) m', duration_str).group(1))
else:
duration_minutes = 0
distance_str = self.attributes.get('Distance:', '0 kilometer(s)')
if 'kilometer' in distance_str:
distance = int(float(re.search(
r'([\.\d]+) k', distance_str).group(1)) * 1000)
else:
distance = int(float(re.search(
r'([\.\d]+) m', distance_str).group(1)) * 1609)
return {
'name': protocol_name,
'duration_hours': duration_hours,
'duration_minutes': duration_minutes,
'distance': distance,
'area': 0,
}
def get_activity(self):
"""Get the activity used for the checklist.
Returns:
str: a name for the activity.
Uses the activities table to separate out specific activities from
the names eBird uses for protocols.
"""
protocol_name = self.attributes.get('Protocol:', None)
if protocol_name in self.activities:
activity = self.activities[protocol_name][0]
else:
activity = self.default_activity
return activity
def get_observers(self):
"""Get the additional observers.
Returns:
list(str): the observers, excluding the person who submitted the
checklist.
"""
try:
count = int(self.attributes.get('Party Size:', '0'))
except ValueError:
count = 0
names = remove_whitespace(
self.attributes.get('Observers:', '').split(','))
return {
'count': count,
'names': names,
}
def get_entries(self):
"""Get the checklist entries with any additional details for the count.
Returns:
list(dict): a list of dicts contains the fields for a checklist
entry. In turn each contains a list of dicts containing the
fields describing the breakdown of the entry count by age and
sex.
"""
entries = []
for selector in self.docroot.select('//tr[@class="spp-entry"]'):
name = selector.select(
'.//h5[@class="se-name"]/text()').extract()[0].strip()
count = selector.select(
'.//h5[@class="se-count"]/text()').extract()[0].strip()
species = {
'name': name,
}
try:
count = int(count)
except ValueError:
count = 0
entries.append({
'species': species,
'count': count,
'details': self.get_entry_details(selector),
'comment': self.get_entry_comment(selector),
})
return entries
def get_entry_comment(self, node):
"""Get any comment for a checklist entry.
Args:
node (HtmlXPathSelector): the node in the tree from where to
extract the comment.
Returns:
str: any comment associated with a checklist entry.
"""
comment = ''
selection = node.select('.//p[@class="obs-comments"]/text()')\
.extract()
if selection:
comment = selection[0].strip()
return comment
def get_entry_details(self, node):
"""Get the details for each count.
Args:
node (HtmlXPathSelector): the node in the tree from where to
extract the entry details.
Returns:
list(dict): a list of dicts containing the fields that describe
the breakdown of the checklist entry count by age and sex.
"""
details = []
xpath = './/div[@class="sd-data-age-sex"]//tr'
names = node.select(xpath).select('./th/text()').extract()
cols = len(names)
row = 0
for selector in node.select(xpath):
ages = selector.select('./td')
if not ages:
continue
sex = ages[0].select('./text()').extract()[0]
for col, age in zip(range(1, cols + 1), names):
values = ages[col].select('./text()').extract()
if values:
details.append({
'identifier': 'DET%02d' % (row * cols + col),
'age': age,
'sex': sex,
'count': int(values[0])
})
row += 1
return details
class EBirdSpider(BaseSpider):
"""Extract checklists recently added to eBird.
The spider starts by using the API to return the observations for the
last <n> days for the selected region. The recent observations for a region
only contain the simple results fields so additional requests are generated
for the recent observations for each location which contain the full result
fields. Not all the useful information for a checklist is available through
the API so the checklist web page from eBird.org is also parsed to extract
information such as the type of protocol used, breakdowns by age and sex of
the counts for each species, etc. The completed checklist is then written
in JSON format to a file.
Details on the eBird API and the different sets of fields returned can be
found at https://confluence.cornell.edu/display/CLOISAPI/eBird+API+1.1
Three settings control the behaviour of the spider:
DOWNLOAD_DIR: the directory where the downloaded checklists
will be written in JSON format. The directory will be created if it does
not exist.
DURATION: the number of days to fetch observations for. The eBird
API allows access to observations up to 30 days old.
EBIRD_INCLUDE_HTML: include data from the checklist web page.
The spider keeps a list of checklists downloaded and save along with any
errors raised. These are used to create a status report by the extension,
SpiderStatusReport which is emailed out when the spider finishes.
"""
name = 'ebird'
allowed_domains = ["ebird.org", "secure.birds.cornell.edu"]
api_parser = JSONParser
html_parser = HTMLParser
region_url = "http://ebird.org/ws1.1/data/obs/region/recent?" \
"rtype=subnational1&r=%s&back=%d&fmt=json"
location_url = "http://ebird.org/ws1.1/data/obs/loc/recent?" \
"r=%s&detail=full&back=%d&includeProvisional=true&fmt=json"
checklist_url = "http://ebird.org/ebird/view/checklist?subID=%s"
def __init__(self, region, **kwargs):
"""Initialize the spider.
Args:
region (str): the code identifying the eBird region to fetch
observations for.
Returns:
EBirdSpider: a Scrapy crawler object.
"""
super(EBirdSpider, self).__init__(**kwargs)
if not region:
raise ValueError("You must specify an eBird region")
self.region = region
self.log("Downloading checklists for region: %s" % self.region,
log.INFO)
self.checklists = []
self.errors = []
self.warnings = []
def start_requests(self):
"""Configure the spider and issue the first request to the eBird API.
Returns:
Request: yields a single request for the recent observations for
an eBird region.
"""
self.duration = int(self.settings['DURATION'])
self.log("Fetching observations for the past %d days" % self.duration,
log.INFO)
self.directory = self.settings['DOWNLOAD_DIR']
if self.directory and not os.path.exists(self.directory):
os.makedirs(self.directory)
self.log("Writing checklists to %s" % self.directory, log.INFO)
self.include_html = self.settings['EBIRD_INCLUDE_HTML']
if self.include_html:
self.log("Downloading checklists from API and web pages", log.INFO)
else:
self.log("Downloading checklists from API only", log.INFO)
url = self.region_url % (self.region, self.duration)
return [Request(url, callback=self.parse_region)]
def parse_region(self, response):
"""Request the recent observations for each location.
Args:
response (Response): the result of calling the eBird API to get the
recent observations for a region.
Returns:
Request: yields a series of requests to the eBird API to get the
recent observations for each location extracted from the
recent observations for the region.
"""
for location in self.api_parser(response).get_locations():
url = self.location_url % (location['identifier'], self.duration)
yield Request(url, callback=self.parse_locations)
def parse_locations(self, response):
"""Create the checklists from the observations.
Args:
response (Response): the result of calling the eBird API to get the
recent observations for a location.
Returns:
Request: (when the attribute include_html is True) yields a series
of requests to the eBird website to get web page used to
display the details of a checklist.
Even with the full results fields there is still useful information
missing so additional requests are generated for the checklist web
page. Whether the spider continues and processes the checklist web
page is controlled by the EBIRD_INCLUDE_HTML setting.
"""
checklists = self.api_parser(response).get_checklists()
for checklist in checklists:
checklist['source']['api'] = response.url
if self.include_html:
url = self.checklist_url % checklist['identifier']
yield Request(url, callback=self.parse_checklist,
dont_filter=True, meta={'checklist': checklist})
else:
self.save_checklist(checklist)
def parse_checklist(self, response):
"""Parse the missing checklist data from the web page.
Args:
response (str): the contents of the checklist web page.
The checklist first extracted from the call the eBird API is passed
through the parse_region() and parse_locations() methods using the
metadata attribute on the Request and Response objects. It is then
merged with the data has been extracted from the web page and written
to a file in the directory specified when the spider was created.
ISSUE: If the setting CONCURRENT_REQUEST != 1 then the checklist data
in the response sometimes does not match the checklist in the request
metadata. The problem appears to be intermittent, but for a given run
of the spider it usually happens after the 4th or 5th response. The
cause is not known. If the problem occurs then an error is logged and
the checklist is discarded.
"""
if not response.url.endswith(response.meta['checklist']['identifier']):
self.log("Checklists in response and request don't match."
"Identifiers: %s != %s" % (
response.url[-9:],
response.meta['checklist']['identifier']
), log.ERROR)
return
update = self.html_parser(response).get_checklist()
original = response.meta['checklist']
checklist = self.merge_checklists(original, update)
checklist['source']['url'] = response.url
self.save_checklist(checklist)
def merge_checklists(self, original, update):
"""Merge two checklists together.
Args:
original (dict): the checklist extracted from the JSON data.
update (dict): the checklist extracted from the web page.
Returns:
dict: an updated checklist containing values from the first
(original) updated with values from the second (update).
"""
entries, warnings = self.merge_entries(
original['entries'], update['entries'])
checklist = {
'meta': {
'version': original['meta']['version'],
'language': original['meta']['language'],
},
'identifier': original['identifier'],
'date': original['date'],
'source': original['source'],
'observers': self.merge_observers(original['observers'],
update['observers']),
'activity': update['activity'],
'location': original['location'],
'comment': update['comment'],
'entries': entries,
}
if 'protocol' in original:
protocol = original['protocol'].copy()
protocol.update(update['protocol'])
else:
protocol = update['protocol'].copy()
checklist['protocol'] = protocol
if warnings:
self.warnings.append((checklist, warnings))
return checklist
def merge_observers(self, originals, updates):
"""Merge the two lists of observers together.
Args:
originals (list): the observer extracted from the API JSON data.
updates (list): the observers extracted from the web page.
Returns:
dict: a dictionary containing all the names reported as observers
on the two checklists along with a total count of the number of
observers present.
"""
names = set(originals['names'])
names.update(set(updates['names']))
total = originals['count'] + updates['count']
for name in originals['names']:
if name in updates['names']:
total -= 1
return {
'names': list(names),
'count': total,
}
def merge_entries(self, originals, updates):
"""Merge two lists of entries together.
Args:
originals (list): the entries extracted from the API JSON data.
updates (list): the entries extracted from the web page.
Returns:
tuple(list, list): a tuple containing the complete (deep) copy of
the entries merged together and a list of any warnings generated
when merging the lists together.
IMPORTANT: The records from the API contain only the species name.
The subspecies name is discarded. That means if there are two records
for a species with the same count. It won't be possible to determine
which record to update when the lists are merged. In this case the
records will not be merged and only the records from the API will be
included in the merged list.
"""
merged = []
warnings = []
for entry in originals:
merged.append({
'identifier': entry['identifier'],
'species': entry['species'].copy(),
'count': entry['count'],
})
index = {}
for entry in merged:
key = entry['species']['name'].split('(')[0].strip()
count = entry['count']
if key in index:
if count in index[key]:
index[key][count].append(entry)
else:
index[key][count] = [entry]
else:
index[key] = {count: [entry]}
for name, counts in index.items():
for count, entries in counts.items():
if len(entries) > 1:
message = "Could not update record from API. There are" \
" %s records that match: species=%s; count=%d." \
% (len(entries), name, count)
warnings.append(message)
self.log(message)
for entry in updates:
key = entry['species']['name'].split('(')[0].strip()
count = entry['count']
target = None
added = False
if key in index:
if count in index[key]:
hits = len(index[key][count])
else:
hits = 0
if hits == 0:
target = {}
merged.append(target)
added = True
elif hits == 1:
target = index[key][count][0]
else:
target = {}
merged.append(target)
added = True
if target is not None:
target['species'] = entry['species'].copy()
target['count'] = entry['count']
if 'comment' in entry:
target['comment'] = entry['comment']
if 'details' in entry:
target['details'] = []
for detail in entry['details']:
target['details'].append(detail.copy())
if added:
message = "Web page contains record missing from API:" \
" species=%s; count=%d." \
% (entry['species']['name'], entry['count'])
if self.settings['LOG_LEVEL'] == 'DEBUG':
warnings.append(message)
self.log(message)
return merged, warnings
def save_checklist(self, checklist):
"""Save the checklist in JSON format.
Args:
checklist (dict); the checklist.
The filename using the source, in this case 'ebird' and the checklist
identifier so that the data is always written to the same file. The
directory where the files are written is defined by the setting
DOWNLOAD_DIR. If the directory attribute is set to None then the
checklist is not saved (used for testing).
The saved checklist is added to the list of checklists downloaded so
far so it can be used to generate a status report once the spider has
finished.
"""
if self.directory:
path = os.path.join(self.directory, "%s-%s.json" % (
checklist['source']['name'], checklist['identifier']))
save_json_data(path, checklist)
self.checklists.append(checklist)
self.log("Wrote %s: %s %s (%s)" % (
path, checklist['date'], checklist['location']['name'],
checklist['source']['submitted_by']), log.DEBUG)
| {
"repo_name": "StuartMacKay/checklists_scrapers",
"path": "checklists_scrapers/spiders/ebird_spider.py",
"copies": "1",
"size": "28525",
"license": "bsd-3-clause",
"hash": 3322886059164118000,
"line_mean": 34.5230386052,
"line_max": 86,
"alpha_frac": 0.5645924628,
"autogenerated": false,
"ratio": 4.643496662868305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000014314138074175864,
"num_lines": 803
} |
"""A scraper for Malta 2007-2013."""
from datapackage_pipelines.wrapper import spew, ingest
from logging import info, debug
from lxml.html import fromstring
from requests import Session
BASE_URL = 'https://investinginyourfuture.gov.mt'
PAGINATION_URL = BASE_URL + '/ajax/loadProjects.ashx?page={counter}'
PROJECT_URLS_XPATH = './/div[@class="project-listing-item-title"]/a'
FIELD_XPATHS = {
'Code': './/span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectRefCode"]',
'Title': './/span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectTitle"]',
'Project Cost': ".//*[@id='mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectCostBeneficiaryItem_divCostValue']",
'Beneficiary': './/span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectCostBeneficiaryItem_divBeneficiaryValue"]',
'Line Ministry': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdLineMinistry"]',
'Start Date': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdStartDate"]',
'End Date': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdEndDate"]',
'Non Technical Short Summary Of Project': ".//*[@id='mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divNonTechnicalShortSummaryContent']/p",
'Operational Programme': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdOperationalProgramme"]',
'Fund': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdFund"]',
'Operational Objective': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdOperationalObjective"]/p',
'Priority Axis': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdPriorityAxis"]',
'Focus Area Of Intervention': './/td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdFocusAreaOfIntervention1"]',
'Project Objectives': './/div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectObjectives"]/p',
'Project Results': './/div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectResults"]/p',
'Project Purpose': './/div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectPurpose"]/p',
}
session = Session()
def scrape_project(url):
"""Return project data as a generator of tuples."""
response = session.get(url)
doc = fromstring(response.content)
def get_text(html_node):
if html_node is not None:
return html_node.text
for key, xpath in FIELD_XPATHS.items():
node = doc.find(xpath)
value = get_text(node)
debug('Extracted %s = %s', key, value)
yield key, value
def scrape_projects(paths):
"""Return generator of project dictionaries."""
for path in paths:
url = BASE_URL + path
project_row = dict(scrape_project(url))
info('Scraped %s', project_row)
yield project_row
def get_project_urls():
"""Return the complete list of project URLS."""
counter = 0
paths = []
while True:
counter += 1
project = PAGINATION_URL.format(counter=counter)
response = session.get(project)
if response.text:
doc = fromstring(response.content)
more_links = doc.findall(PROJECT_URLS_XPATH)
more_paths = list(map(lambda x: x.get('href'), more_links))
paths.extend(more_paths)
info('Collected %s urls on page %s', len(more_paths), counter)
else:
return paths
if __name__ == '__main__':
_, datapackage, _ = ingest()
project_paths = get_project_urls()
project_rows = scrape_projects(project_paths)
spew(datapackage, [project_rows])
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/MT/mt_malta_scraper.py",
"copies": "1",
"size": "4034",
"license": "mit",
"hash": 4061896130228096000,
"line_mean": 45.367816092,
"line_max": 174,
"alpha_frac": 0.7198810114,
"autogenerated": false,
"ratio": 3.7421150278293136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4961996039229314,
"avg_score": null,
"num_lines": null
} |
"""A scraper for Malta 2007-2013."""
import requests
import lxml
import csv
from lxml import html
__author__ = 'Fernando Blat'
# Base URL is the host of the page
BASE_URL = 'https://investinginyourfuture.gov.mt'
# Projects are fetch from the paginated list
PAGINATION_URL = 'https://investinginyourfuture.gov.mt/ajax/loadProjects.ashx?page='
def get_text(html_node):
if html_node is not None:
return html_node.text
def scrape_project(url):
project_data = []
url = BASE_URL + url
response = requests.get(url)
doc = lxml.html.fromstring(response.content)
# Code
project_data.append(get_text(doc.find('.//span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectRefCode"]')))
# Title
project_data.append(get_text(doc.find('.//span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectTitle"]')))
# Project Cost
project_data.append(get_text(doc.find('.//span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectCostBeneficiaryItem_divCostValue"]')))
# Beneficiary
project_data.append(get_text(doc.find('.//span[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectCostBeneficiaryItem_divBeneficiaryValue"]')))
# Line Ministry
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdLineMinistry"]')))
# Start Date
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdStartDate"]')))
# End Date
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdEndDate"]')))
# Non Technical Short Summary Of Project
project_data.append(get_text(doc.find('.//div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divNonTechnicalShortSummaryContent"]/p')))
# Operational Programme
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdOperationalProgramme"]')))
# Fund
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdFund"]')))
# Operational Objective
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdOperationalObjective"]')))
# Priority Axis
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdPriorityAxis"]')))
# Focus Area Of Intervention
project_data.append(get_text(doc.find('.//td[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_tdFocusAreaOfIntervention1"]')))
# Project Objectives
project_data.append(get_text(doc.find('.//div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectObjectives"]/p')))
# Project Results
project_data.append(get_text(doc.find('.//div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectResults"]/p')))
# Project Purpose
project_data.append(get_text(doc.find('.//div[@id="mainPlaceHolder_coreContentPlaceHolder_mainContentPlaceHolder_projectDetails_divProjectPurpose"]/p')))
return project_data
def scrape():
headers = [
'Code', 'Title', 'Project Cost', 'Beneficiary', 'Line Ministry', 'Start Date', 'End Date', 'Non Technical Short Summary Of Project', 'Operational Programme', 'Fund', 'Operational Objective',
'Priority Axis', 'Focus Area Of Intervention', 'Project Objectives', 'Project Results', 'Project Purpose'
]
with open('data.csv', 'w', newline='') as f:
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(headers)
page = 1
while True:
res = requests.get(PAGINATION_URL + str(page))
if 'Content-Length' in res.headers and res.headers['Content-Length'] == '0':
print("Exiting...")
break
doc = html.fromstring(res.content)
for link in doc.findall('.//div[@class="project-listing-item-title"]/a'):
project_data_row = scrape_project(link.get('href'))
writer.writerow(project_data_row)
# row_dict = dict(zip(headers, project_data_row))
# info('Scraped row = %s', row_dict)
# yield row_dict
page += 1
print("\n")
def process_resources():
yield scrape()
if __name__ == '__main__':
scrape()
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/MT/scraper_original.py",
"copies": "1",
"size": "4732",
"license": "mit",
"hash": 4410809753228718000,
"line_mean": 46.32,
"line_max": 198,
"alpha_frac": 0.7115384615,
"autogenerated": false,
"ratio": 3.6882307092751363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9892186051946877,
"avg_score": 0.001516623765651877,
"num_lines": 100
} |
# A *SCRATCH* of a bot that watches YouTube videos through tor using selenium and stem
from stem import Signal
from stem.control import Controller
import stem.process
import time, random, signal, sys
from selenium import webdriver
from selenium.webdriver.common.proxy import *
myProxy = "localhost:9150"
proxy = Proxy({
'proxyType': ProxyType.MANUAL,
'socksProxy': myProxy,
'noProxy': ''
})
def print_bootstrap_lines(line):
if "Bootstrapped " in line:
print(line)
print("Starting Tor:\n")
tor_process = stem.process.launch_tor_with_config(
tor_cmd = "[TOR.EXE_PATH]",
config = {
'SocksPort': "9150",
'ControlPort':"9151"
},
init_msg_handler = print_bootstrap_lines,
)
driver = webdriver.Firefox(proxy=proxy)
total_views = 0
try:
while True:
driver.get("https://www.atagar.com/echo.php")
print(driver.find_element_by_tag_name('body').text)
driver.get("[YoutTube Video Link]")
watch_time = random.choice(range(20, 41))
print("Watching video for: " + str(watch_time) + " seconds")
time.sleep(watch_time)
with Controller.from_port(port = 9151) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
driver.delete_all_cookies()
total_views += 1
print("Views: " + str(total_views))
finally:
print("Exiting...")
driver.close()
tor_process.kill() | {
"repo_name": "iluxonchik/python-general-repo",
"path": "bots/youtube/tortube.py",
"copies": "1",
"size": "1426",
"license": "mit",
"hash": -796480583642586900,
"line_mean": 23.6034482759,
"line_max": 86,
"alpha_frac": 0.6535764376,
"autogenerated": false,
"ratio": 3.4611650485436893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.461474148614369,
"avg_score": null,
"num_lines": null
} |
"""A screensaver version of Newton's Cradle with an interactive mode.
"""
__docformat__ = "reStructuredText"
import os
import random
import sys
description = """
---- Newton's Cradle ----
A screensaver version of Newton's Cradle with an interactive mode
/s - Run in fullscreen screensaver mode
/p #### - Display a preview of the screensaver using a window handler
/i - Interactive mode
"""
if len(sys.argv) < 2:
print(description)
sys.exit()
is_interactive = False
display_flags = 0
if sys.argv[1] == "/p": # preview mode
os.environ["SDL_VIDEODRIVER"] = "windib"
os.environ["SDL_WINDOWID"] = sys.argv[2]
display_size = (100, 100)
is_interactive = False
### We must set OS env before the pygame imports..
import pygame
if sys.argv[1] == "/s": # fullscreen screensaver mode
display_size = (0, 0)
is_interactive = False
display_flags = (
display_flags | pygame.FULLSCREEN
) # FULLSCREEN) # | DOUBLEBUF | HWSURFACE )
elif sys.argv[1] == "/i": # interactive
display_size = (600, 600)
is_interactive = True
import pymunk as pm
from pymunk import Vec2d
def drawcircle(image, colour, origin, radius, width=0):
if width == 0:
pygame.draw.circle(image, colour, origin, int(radius))
else:
if radius > 65534 / 5:
radius = 65534 / 5
circle = pygame.Surface(
[radius * 2 + width, radius * 2 + width]
).convert_alpha()
circle.fill([0, 0, 0, 0])
pygame.draw.circle(
circle,
colour,
[circle.get_width() / 2, circle.get_height() / 2],
radius + (width / 2),
)
if int(radius - (width / 2)) > 0:
pygame.draw.circle(
circle,
[0, 0, 0, 0],
[circle.get_width() / 2, circle.get_height() / 2],
abs(int(radius - (width / 2))),
)
image.blit(
circle,
[
origin[0] - (circle.get_width() / 2),
origin[1] - (circle.get_height() / 2),
],
)
def reset_bodies(space):
for body in space.bodies:
body.position = Vec2d(*body.start_position)
body.force = 0, 0
body.torque = 0
body.velocity = 0, 0
body.angular_velocity = 0
color = pygame.Color(
random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)
)
for shape in space.shapes:
shape.color = color
def main():
pygame.init()
screen = pygame.display.set_mode(display_size, display_flags)
width, height = screen.get_size()
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y + height)
def from_pygame(p):
return to_pygame(p)
clock = pygame.time.Clock()
running = True
font = pygame.font.Font(None, 16)
### Physics stuff
space = pm.Space()
space.gravity = (0.0, -1900.0)
space.damping = 0.999 # to prevent it from blowing up.
mouse_body = pm.Body(body_type=pm.Body.KINEMATIC)
bodies = []
for x in range(-100, 150, 50):
x += width / 2
offset_y = height / 2
mass = 10
radius = 25
moment = pm.moment_for_circle(mass, 0, radius, (0, 0))
body = pm.Body(mass, moment)
body.position = (x, -125 + offset_y)
body.start_position = Vec2d(*body.position)
shape = pm.Circle(body, radius)
shape.elasticity = 0.9999999
space.add(body, shape)
bodies.append(body)
pj = pm.PinJoint(space.static_body, body, (x, 125 + offset_y), (0, 0))
space.add(pj)
reset_bodies(space)
selected = None
if not is_interactive:
pygame.time.set_timer(pygame.USEREVENT + 1, 70000) # apply force
pygame.time.set_timer(pygame.USEREVENT + 2, 120000) # reset
pygame.event.post(pygame.event.Event(pygame.USEREVENT + 1))
pygame.mouse.set_visible(False)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "newtons_cradle.png")
if event.type == pygame.USEREVENT + 1:
r = random.randint(1, 4)
for body in bodies[0:r]:
body.apply_impulse_at_local_point((-6000, 0))
if event.type == pygame.USEREVENT + 2:
reset_bodies(space)
elif (
event.type == pygame.KEYDOWN
and event.key == pygame.K_r
and is_interactive
):
reset_bodies(space)
elif (
event.type == pygame.KEYDOWN
and event.key == pygame.K_f
and is_interactive
):
r = random.randint(1, 4)
for body in bodies[0:r]:
body.apply_impulse_at_local_point((-6000, 0))
elif event.type == pygame.MOUSEBUTTONDOWN and is_interactive:
if selected != None:
space.remove(selected)
p = from_pygame(Vec2d(*event.pos))
hit = space.point_query_nearest(p, 0, pm.ShapeFilter())
if hit != None:
shape = hit.shape
rest_length = mouse_body.position.get_distance(shape.body.position)
ds = pm.DampedSpring(
mouse_body, shape.body, (0, 0), (0, 0), rest_length, 1000, 10
)
space.add(ds)
selected = ds
elif event.type == pygame.MOUSEBUTTONUP and is_interactive:
if selected != None:
space.remove(selected)
selected = None
elif event.type == pygame.KEYDOWN:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
running = False
mpos = pygame.mouse.get_pos()
p = from_pygame(Vec2d(*mpos))
mouse_body.position = p
### Clear screen
screen.fill(pygame.Color("black"))
### Draw stuff
for c in space.constraints:
pv1 = c.a.position + c.anchor_a
pv2 = c.b.position + c.anchor_b
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.aalines(screen, pygame.Color("lightgray"), False, [p1, p2])
for ball in space.shapes:
p = to_pygame(ball.body.position)
drawcircle(screen, ball.color, p, int(ball.radius), 0)
# pygame.draw.circle(screen, ball.color, p, int(ball.radius), 0)
### Update physics
fps = 50
iterations = 25
dt = 1.0 / float(fps) / float(iterations)
for x in range(iterations): # 10 iterations to get a more stable simulation
space.step(dt)
### Flip screen
if is_interactive:
screen.blit(
font.render(
"fps: " + str(clock.get_fps()), True, pygame.Color("white")
),
(0, 0),
)
screen.blit(
font.render(
"Press left mouse button and drag to interact",
True,
pygame.Color("darkgrey"),
),
(5, height - 35),
)
screen.blit(
font.render(
"Press R to reset, any other key to quit",
True,
pygame.Color("darkgrey"),
),
(5, height - 20),
)
pygame.display.flip()
clock.tick(fps)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "viblo/pymunk",
"path": "examples/newtons_cradle.py",
"copies": "1",
"size": "7866",
"license": "mit",
"hash": -5273664301102093000,
"line_mean": 29.968503937,
"line_max": 87,
"alpha_frac": 0.5160183066,
"autogenerated": false,
"ratio": 3.7890173410404624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48050356476404626,
"avg_score": null,
"num_lines": null
} |
"""A screensaver version of Newton's Cradle with an interactive mode.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import sys, random
import os
description = """
---- Newton's Cradle ----
A screensaver version of Newton's Cradle with an interactive mode
/s - Run in fullscreen screensaver mode
/p #### - Display a preview of the screensaver using a window handler
/i - Interactive mode
"""
if len(sys.argv) < 2:
print description
sys.exit()
is_interactive = False
display_flags = 0
if sys.argv[1] == "/p": #preview mode
os.environ['SDL_VIDEODRIVER'] = 'windib'
os.environ['SDL_WINDOWID'] = sys.argv[2]
display_size = (100,100)
is_interactive = False
### We must set OS env before the pygame imports..
import pygame
from pygame.locals import *
from pygame.color import *
if sys.argv[1] == "/s": #fullscreen screensaver mode
display_size = (0,0)
is_interactive = False
display_flags = display_flags | FULLSCREEN # FULLSCREEN) # | DOUBLEBUF | HWSURFACE )
elif sys.argv[1] == "/i": #interactive
display_size = (600,600)
is_interactive = True
import pymunk as pm
from pymunk import Vec2d
def drawcircle(image, colour, origin, radius, width=0):
if width == 0:
pygame.draw.circle(image,colour,origin,int(radius))
else:
if radius > 65534/5:
radius = 65534/5
circle = pygame.Surface([radius*2+width,radius*2+width]).convert_alpha()
circle.fill([0,0,0,0])
pygame.draw.circle(circle, colour, [circle.get_width()/2, circle.get_height()/2], radius+(width/2))
if int(radius-(width/2)) > 0:
pygame.draw.circle(circle, [0,0,0,0], [circle.get_width()/2, circle.get_height()/2], abs(int(radius-(width/2))))
image.blit(circle, [origin[0] - (circle.get_width()/2), origin[1] - (circle.get_height()/2)])
print THECOLORS
def reset_bodies(space):
for body in space.bodies:
body.position = Vec2d(body.start_position)
body.reset_forces()
body.velocity = 0,0
body.angular_velocity = 0
color = random.choice(THECOLORS.values())
for shape in space.shapes:
shape.color = color
def main():
pygame.init()
screen = pygame.display.set_mode(display_size, display_flags)
width, height = screen.get_size()
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y+height)
def from_pygame(p):
return to_pygame(p)
clock = pygame.time.Clock()
running = True
font = pygame.font.Font(None, 16)
### Physics stuff
space = pm.Space(iterations = 1)
space.gravity = (0.0, -1900.0)
space.damping = 0.999 # to prevent it from blowing up.
static_body = pm.Body()
mouse_body = pm.Body()
bodies = []
for x in range(-100,150,50):
x += width / 2
offset_y = height/2
mass = 10
radius = 25
moment = pm.moment_for_circle(mass, 0, radius, (0,0))
body = pm.Body(mass, moment)
body.position = (x,-125+offset_y)
body.start_position = Vec2d(body.position)
shape = pm.Circle(body, radius)
shape.elasticity = 0.9999999
space.add(body, shape)
bodies.append(body)
pj = pm.PinJoint(static_body, body, (x,125+offset_y), (0,0))
space.add(pj)
reset_bodies(space)
selected = None
if not is_interactive:
pygame.time.set_timer(USEREVENT+1, 70000) # apply force
pygame.time.set_timer(USEREVENT+2, 120000) # reset
pygame.event.post(pygame.event.Event(USEREVENT+1))
pygame.mouse.set_visible(False)
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "newtons_cradle.png")
if event.type == pygame.USEREVENT+1:
r = random.randint(1,4)
for body in bodies[0:r]:
body.apply_impulse((-6000,0))
if event.type == pygame.USEREVENT+2:
reset_bodies(space)
elif event.type == KEYDOWN and event.key == K_r and is_interactive:
reset_bodies(space)
elif event.type == KEYDOWN and event.key == K_f and is_interactive:
r = random.randint(1,4)
for body in bodies[0:r]:
body.apply_impulse((-6000,0))
elif event.type == MOUSEBUTTONDOWN and is_interactive:
if selected != None:
space.remove(selected)
p = from_pygame(Vec2d(event.pos))
shape = space.point_query_first(p)
if shape != None:
rest_length = mouse_body.position.get_distance(shape.body.position)
ds = pm.DampedSpring(mouse_body, shape.body, (0,0), (0,0), rest_length, 1000, 10)
space.add(ds)
selected = ds
elif event.type == MOUSEBUTTONUP and is_interactive:
if selected != None:
space.remove(selected)
selected = None
elif event.type == KEYDOWN:
running = False
elif event.type == MOUSEBUTTONDOWN:
running = False
mpos = pygame.mouse.get_pos()
p = from_pygame( Vec2d(mpos) )
mouse_body.position = p
### Clear screen
screen.fill(THECOLORS["black"])
### Draw stuff
for c in space.constraints:
pv1 = c.a.position + c.anchr1
pv2 = c.b.position + c.anchr2
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.aalines(screen, THECOLORS["lightgray"], False, [p1,p2])
for ball in space.shapes:
p = to_pygame(ball.body.position)
drawcircle(screen, ball.color, p, int(ball.radius), 0)
#pygame.draw.circle(screen, ball.color, p, int(ball.radius), 0)
### Update physics
fps = 50
iterations = 25
dt = 1.0/float(fps)/float(iterations)
for x in range(iterations): # 10 iterations to get a more stable simulation
space.step(dt)
### Flip screen
if is_interactive:
screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
screen.blit(font.render("Press left mouse button and drag to interact", 1, THECOLORS["darkgrey"]), (5,height - 35))
screen.blit(font.render("Press R to reset, any other key to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
pygame.display.flip()
clock.tick(fps)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "sneharavi12/DeepLearningFinals",
"path": "pymunk-pymunk-4.0.0/examples/newtons_cradle.py",
"copies": "5",
"size": "7014",
"license": "mit",
"hash": -5845869171582654000,
"line_mean": 33.8955223881,
"line_max": 127,
"alpha_frac": 0.5583119475,
"autogenerated": false,
"ratio": 3.6398546964193046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6698166643919304,
"avg_score": null,
"num_lines": null
} |
"""A screensaver version of Newton's Cradle with an interactive mode.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import sys, random
import os
description = """
---- Newton's Cradle ----
A screensaver version of Newton's Cradle with an interactive mode
/s - Run in fullscreen screensaver mode
/p #### - Display a preview of the screensaver using a window handler
/i - Interactive mode
"""
if len(sys.argv) < 2:
print description
sys.exit()
is_interactive = False
display_flags = 0
if sys.argv[1] == "/p": #preview mode
os.environ['SDL_VIDEODRIVER'] = 'windib'
os.environ['SDL_WINDOWID'] = sys.argv[2]
display_size = (100,100)
is_interactive = False
### We must set OS env before the pygame imports..
import pygame
from pygame.locals import *
from pygame.color import *
if sys.argv[1] == "/s": #fullscreen screensaver mode
display_size = (0,0)
is_interactive = False
display_flags = display_flags | FULLSCREEN # FULLSCREEN) # | DOUBLEBUF | HWSURFACE )
elif sys.argv[1] == "/i": #interactive
display_size = (600,600)
is_interactive = True
import pymunk as pm
from pymunk import Vec2d
def drawcircle(image, colour, origin, radius, width=0):
if width == 0:
pygame.draw.circle(image,colour,origin,int(radius))
else:
if radius > 65534/5:
radius = 65534/5
circle = pygame.Surface([radius*2+width,radius*2+width]).convert_alpha()
circle.fill([0,0,0,0])
pygame.draw.circle(circle, colour, [circle.get_width()/2, circle.get_height()/2], radius+(width/2))
if int(radius-(width/2)) > 0:
pygame.draw.circle(circle, [0,0,0,0], [circle.get_width()/2, circle.get_height()/2], abs(int(radius-(width/2))))
image.blit(circle, [origin[0] - (circle.get_width()/2), origin[1] - (circle.get_height()/2)])
print THECOLORS
def reset_bodies(space):
for body in space.bodies:
body.position = Vec2d(body.start_position)
body.reset_forces()
body.velocity = 0,0
body.angular_velocity = 0
color = random.choice(THECOLORS.values())
for shape in space.shapes:
shape.color = color
def main():
pygame.init()
screen = pygame.display.set_mode(display_size, display_flags)
width, height = screen.get_size()
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y+height)
def from_pygame(p):
return to_pygame(p)
clock = pygame.time.Clock()
running = True
font = pygame.font.Font(None, 16)
### Physics stuff
space = pm.Space(iterations = 1)
space.gravity = (0.0, -1900.0)
space.damping = 0.999 # to prevent it from blowing up.
static_body = pm.Body()
mouse_body = pm.Body()
bodies = []
for x in range(-100,150,50):
x += width / 2
offset_y = height/2
mass = 10
radius = 25
moment = pm.moment_for_circle(mass, 0, radius, (0,0))
body = pm.Body(mass, moment)
body.position = (x,-125+offset_y)
body.start_position = Vec2d(body.position)
shape = pm.Circle(body, radius)
shape.elasticity = 0.9999999
space.add(body, shape)
bodies.append(body)
pj = pm.PinJoint(static_body, body, (x,125+offset_y), (0,0))
space.add(pj)
reset_bodies(space)
selected = None
if not is_interactive:
pygame.time.set_timer(USEREVENT+1, 70000) # apply force
pygame.time.set_timer(USEREVENT+2, 120000) # reset
pygame.event.post(pygame.event.Event(USEREVENT+1))
pygame.mouse.set_visible(False)
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "newtons_cradle.png")
if event.type == pygame.USEREVENT+1:
r = random.randint(1,4)
for body in bodies[0:r]:
body.apply_impulse((-6000,0))
if event.type == pygame.USEREVENT+2:
reset_bodies(space)
elif event.type == KEYDOWN and event.key == K_r and is_interactive:
reset_bodies(space)
elif event.type == KEYDOWN and event.key == K_f and is_interactive:
r = random.randint(1,4)
for body in bodies[0:r]:
body.apply_impulse((-6000,0))
elif event.type == MOUSEBUTTONDOWN and is_interactive:
if selected != None:
space.remove(selected)
p = from_pygame(Vec2d(event.pos))
shape = space.point_query_first(p)
if shape != None:
rest_length = mouse_body.position.get_distance(shape.body.position)
ds = pm.DampedSpring(mouse_body, shape.body, (0,0), (0,0), rest_length, 1000, 10)
space.add(ds)
selected = ds
elif event.type == MOUSEBUTTONUP and is_interactive:
if selected != None:
space.remove(selected)
selected = None
elif event.type == KEYDOWN:
running = False
elif event.type == MOUSEBUTTONDOWN:
running = False
mpos = pygame.mouse.get_pos()
p = from_pygame( Vec2d(mpos) )
mouse_body.position = p
### Clear screen
screen.fill(THECOLORS["black"])
### Draw stuff
for c in space.constraints:
pv1 = c.a.position + c.anchr1
pv2 = c.b.position + c.anchr2
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.aalines(screen, THECOLORS["lightgray"], False, [p1,p2])
for ball in space.shapes:
p = to_pygame(ball.body.position)
drawcircle(screen, ball.color, p, int(ball.radius), 0)
#pygame.draw.circle(screen, ball.color, p, int(ball.radius), 0)
### Update physics
fps = 50
iterations = 25
dt = 1.0/float(fps)/float(iterations)
for x in range(iterations): # 10 iterations to get a more stable simulation
space.step(dt)
### Flip screen
if is_interactive:
screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
screen.blit(font.render("Press left mouse button and drag to interact", 1, THECOLORS["darkgrey"]), (5,height - 35))
screen.blit(font.render("Press R to reset, any other key to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
pygame.display.flip()
clock.tick(fps)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "cfobel/python___pymunk",
"path": "examples/newtons_cradle.py",
"copies": "1",
"size": "7215",
"license": "mit",
"hash": 3893974385874662000,
"line_mean": 33.8955223881,
"line_max": 127,
"alpha_frac": 0.5427581428,
"autogenerated": false,
"ratio": 3.6848825331971398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47276406759971396,
"avg_score": null,
"num_lines": null
} |
""" A script for adding visits to the same patient
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> python manage.py addmultiplevisits
To run on production:
> python manage.py addmultiplevisits --remote
"""
import getpass
import logging
import settings
import datetime
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
from optparse import make_option
from healthdb import models
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
ROWS_PER_BATCH = 50
def run():
count = 0
visits = models.Visit.all().order('__key__').fetch(ROWS_PER_BATCH)
while visits:
visits_to_save = []
for visit in visits:
if not visit.short_string:
visit.assign_short_string()
visits_to_save.append(visit)
db.put(visits_to_save)
visits_to_save = []
count += len(visits)
logging.info('Updated %d visits' % count)
visits = models.Visit.all().order('__key__').filter('__key__ >', visits[-1].key()).fetch(ROWS_PER_BATCH)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Adds multiple visits to a patient'
args = ''
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'add multiple visits to a patient'
args = ''
def handle(self, *app_labels, **options):
logging.getLogger().setLevel(logging.INFO)
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
app_id = options.get('app_id')
remote = options.get('remote')
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
app_id, remote_api_url, auth_func, host)
run()
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/addvisitshortstring.py",
"copies": "1",
"size": "2548",
"license": "bsd-3-clause",
"hash": -5677069154794431000,
"line_mean": 30.85,
"line_max": 108,
"alpha_frac": 0.6609105181,
"autogenerated": false,
"ratio": 3.6714697406340058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4832380258734006,
"avg_score": null,
"num_lines": null
} |
# A script for analyzing the output of NPSPY and merging data about streams.
import sys
def ReadFile(filename, flags='rb'):
"""Returns the contents of a file."""
file = open(filename, flags)
result = file.read()
file.close()
return result
def WriteFile(filename, contents):
"""Overwrites the file with the given contents."""
file = open(filename, 'w')
file.write(contents)
file.close()
# sample line: 'NPP_NewStream(0x645c898, 0x56ba900("application/x-shockwave-flash"), 0x64bb3b0 (http://weeklyad.target.com/target/flash/target/target.swf?ver=090326), TRUE, NP_NORMAL)'
class Stream:
def __init__(self, line):
split = line.split(', ')
self.mime_type = split[1].split('"')[1]
self.url = split[2].split(' ')[1].strip('()')
self.seekable = split[3]
self.type = split[4].strip(')')
self.size = 0
self.status = ''
try:
self.address = split[2].split(' ')[0]
except:
print 'parsing error on ' + line
self.address = ''
if self.type != 'NP_NORMAL':
print 'line got unexpected type: ' + line
def main(argv=None):
if argv is None:
argv = sys.argv
streams = []
if len(argv) != 2:
print 'need filename'
return
file = ReadFile(argv[1])
for line in file.splitlines():
if line.startswith('NPP_NewStream('):
if line.count('(') < 3:
print 'unknown format for line: ' + line
continue
s = Stream(line)
streams.append(s)
elif line.startswith('NPP_Write('):
# sample: NPP_Write(0x645c898, 0x64bb3b0, 0, 16384, 0x56c1000("CW")))
split = line.split(', ')
address = split[1]
start = int(split[2])
size = int(split[3])
found = False
for stream in streams:
if stream.address == address:
if stream.size != start:
print 'error: starting at wrong place for write ' + stream.url + ' ' + str(stream.size) + ' ' + str(start)
stream.size += size
found = True
break
if not found:
print "couldn't find stream to match NPP_Write " + line
elif line.startswith('NPP_DestroyStream('):
# sample: NPP_DestroyStream(0x645c898, 0x64bb3b0, NPRES_DONE)
split = line.split(', ')
address = split[1]
status = split[2].strip(')')
found = False
for stream in streams:
if stream.address == address:
stream.status = status
stream.address = '' # address can be reused
found = True
break
if not found:
print "couldn't find stream to match NPP_DestroyStream " + line
output = []
for stream in streams:
if stream.status != 'NPRES_DONE':
print 'error: no NPP_DestroyStream with success for ' + stream.url + ' ' + stream.status + '.'
output.append(', '.join([stream.url, stream.mime_type, str(stream.size), stream.seekable]))
output_file = argv[1].replace('.', '_analyzed.')
WriteFile(output_file, '\n'.join(output))
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "7kbird/chrome",
"path": "third_party/npapi/npspy/analyze_streams.py",
"copies": "1",
"size": "3057",
"license": "bsd-3-clause",
"hash": 2732773048278202000,
"line_mean": 28.1142857143,
"line_max": 184,
"alpha_frac": 0.5904481518,
"autogenerated": false,
"ratio": 3.485746864310148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9370049891967139,
"avg_score": 0.041229024828601676,
"num_lines": 105
} |
# A script for analyzing the output of NPSPY and merging data about streams.
import sys
def ReadFile(filename, flags='rb'):
"""Returns the contents of a file."""
file = open(filename, flags)
result = file.read()
file.close()
return result
def WriteFile(filename, contents):
"""Overwrites the file with the given contents."""
file = open(filename, 'w')
file.write(contents)
file.close()
# sample line: 'NPP_NewStream(0x645c898, 0x56ba900("application/x-shockwave-flash"), 0x64bb3b0 (http://weeklyad.target.com/target/flash/target/target.swf?ver=090326), TRUE, NP_NORMAL)'
class Stream:
def __init__(self, line):
split = line.split(', ')
self.mime_type = split[1].split('"')[1]
self.url = split[2].split(' ')[1].strip('()')
self.seekable = split[3]
self.type = split[4].strip(')')
self.size = 0
self.status = ''
try:
self.address = split[2].split(' ')[0]
except:
print 'parsing error on ' + line
self.address = ''
if self.type != 'NP_NORMAL':
print 'line got unexpected type: ' + line
def main(argv=None):
if argv is None:
argv = sys.argv
streams = []
if len(argv) != 2:
print 'need filename'
return
file = ReadFile(argv[1])
for line in file.splitlines():
if line.startswith('NPP_NewStream('):
if line.count('(') < 3:
print 'unknown format for line: ' + line
continue
s = Stream(line)
streams.append(s)
elif line.startswith('NPP_Write('):
# sample: NPP_Write(0x645c898, 0x64bb3b0, 0, 16384, 0x56c1000("CW")))
split = line.split(', ')
address = split[1]
start = int(split[2])
size = int(split[3])
found = False
for stream in streams:
if stream.address == address:
if stream.size != start:
print 'error: starting at wrong place for write ' + stream.url + ' ' + str(stream.size) + ' ' + str(start)
stream.size += size
found = True
break
if not found:
print "couldn't find stream to match NPP_Write " + line
elif line.startswith('NPP_DestroyStream('):
# sample: NPP_DestroyStream(0x645c898, 0x64bb3b0, NPRES_DONE)
split = line.split(', ')
address = split[1]
status = split[2].strip(')')
found = False
for stream in streams:
if stream.address == address:
stream.status = status
stream.address = '' # address can be reused
found = True
break
if not found:
print "couldn't find stream to match NPP_DestroyStream " + line
output = []
for stream in streams:
if stream.status != 'NPRES_DONE':
print 'error: no NPP_DestroyStream with success for ' + stream.url + ' ' + stream.status + '.'
output.append(', '.join([stream.url, stream.mime_type, str(stream.size), stream.seekable]))
output_file = argv[1].replace('.', '_analyzed.')
WriteFile(output_file, '\n'.join(output))
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "BigBrother1984/android_external_chromium_org",
"path": "third_party/npapi/npspy/analyze_streams.py",
"copies": "127",
"size": "3162",
"license": "bsd-3-clause",
"hash": -7815812489917302000,
"line_mean": 28.1142857143,
"line_max": 184,
"alpha_frac": 0.5708412397,
"autogenerated": false,
"ratio": 3.5728813559322035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" A script for calculating cached latest_visit statistics on patients.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py visitcalc
To run on production:
> manage.py visitcalc --remote
"""
import getpass
import logging
import settings
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
from optparse import make_option
from healthdb import models
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
# Number of rows to read/write at once
ROWS_PER_BATCH = 200
class LoadError():
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def set_patient_visit_stats():
'''Set latest_visit statistics on Patient instances.
NOTE: This is not accurate if data is added while this is
happening, but it's not a problem for now.
'''
pats = models.Patient.all().order('__key__').fetch(ROWS_PER_BATCH)
count = 0
while pats:
pats_to_put = []
for pat in pats:
try:
if pat.set_latest_visit(force=True, put = False):
pats_to_put.append(pat)
except TypeError, err:
logging.info('Skip patient %s: %s' % (pat.short_string, err))
db.put(pats_to_put)
count += len(pats)
logging.info('Set %d visit caches' % count)
pats = models.Patient.all().order('__key__').filter(
'__key__ >', pats[-1].key()).fetch(ROWS_PER_BATCH)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Sets counts'
args = ''
def handle(self, *app_labels, **options):
# Turn off copious DEBUG logging
logging.getLogger().setLevel(logging.INFO)
# Note: this app is only supported for decisionapp
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
app_id = options.get('app_id')
if not app_id:
raise CommandError('Must give --app-id')
# Configure local server to run against, if we're not --remote
# TODO(max): I couldn't get this to run against the correct local
# instance of the datastore, so we'll connect this way. It remains
# a TODO to just run this script directly, without this block.
remote = options.get('remote') # None==local, True==remote (production)
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
"childdb", remote_api_url, auth_func, host)
set_patient_visit_stats()
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/visitcalc.py",
"copies": "1",
"size": "3112",
"license": "bsd-3-clause",
"hash": -5553732034112976000,
"line_mean": 29.4343434343,
"line_max": 75,
"alpha_frac": 0.6433161954,
"autogenerated": false,
"ratio": 3.762998790810157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4906314986210157,
"avg_score": null,
"num_lines": null
} |
"""A script for converting the EPA CEMS dataset from gzip to Apache Parquet.
The original EPA CEMS data is available as ~12,000 gzipped CSV files, one for
each month for each state, from 1995 to the present. On disk they take up
about 7.3 GB of space, compressed. Uncompressed it is closer to 100 GB. That's
too much data to work with in memory.
Apache Parquet is a compressed, columnar datastore format, widely used in Big
Data applications. It's an open standard, and is very fast to read from disk.
It works especially well with both `Dask dataframes <https://dask.org/>`__ (a
parallel / distributed computing extension of pandas) and Apache Spark (a cloud
based Big Data processing pipeline system.)
Since pulling 100 GB of data into SQLite takes a long time, and working with
that data en masse isn't particularly pleasant on a laptop, this script can be
used to convert the original EPA CEMS data to the more widely usable Apache
Parquet format for use with Dask, either on a multi-core workstation or in an
interactive cloud computing environment like `Pangeo <https://pangeo.io>`__.
"""
import argparse
import logging
import pathlib
import sys
from functools import partial
import coloredlogs
import pandas as pd
import pyarrow as pa
from pyarrow import parquet as pq
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
def create_in_dtypes():
"""
Create a dictionary of input data types.
This specifies the dtypes of the input columns, which is necessary for some
cases where, e.g., a column is always NaN.
Returns:
dict: mapping columns names to :mod:`pandas` data types.
"""
# These measurement codes are used by all four of our measurement variables
common_codes = (
"LME",
"Measured",
"Measured and Substitute",
"Other",
"Substitute",
"Undetermined",
"Unknown Code",
"",
)
co2_so2_cats = pd.CategoricalDtype(categories=common_codes, ordered=False)
nox_cats = pd.CategoricalDtype(
categories=common_codes + ("Calculated",), ordered=False
)
state_cats = pd.CategoricalDtype(
categories=pc.cems_states.keys(), ordered=False)
in_dtypes = {
"state": state_cats,
"plant_id_eia": "int32",
"unitid": pd.StringDtype(),
"operating_time_hours": "float32",
"gross_load_mw": "float32",
"steam_load_1000_lbs": "float32",
"so2_mass_lbs": "float32",
"so2_mass_measurement_code": co2_so2_cats,
"nox_rate_lbs_mmbtu": "float32",
"nox_rate_measurement_code": nox_cats,
"nox_mass_lbs": "float32",
"nox_mass_measurement_code": nox_cats,
"co2_mass_tons": "float32",
"co2_mass_measurement_code": co2_so2_cats,
"heat_content_mmbtu": "float32",
"facility_id": pd.Int32Dtype(),
"unit_id_epa": pd.Int32Dtype(),
}
return in_dtypes
def create_cems_schema():
"""Make an explicit Arrow schema for the EPA CEMS data.
Make changes in the types of the generated parquet files by editing this
function.
Note that parquet's internal representation doesn't use unsigned numbers or
16-bit ints, so just keep things simple here and always use int32 and
float32.
Returns:
pyarrow.schema: An Arrow schema for the EPA CEMS data.
"""
int_nullable = partial(pa.field, type=pa.int32(), nullable=True)
int_not_null = partial(pa.field, type=pa.int32(), nullable=False)
str_not_null = partial(pa.field, type=pa.string(), nullable=False)
# Timestamp resolution is hourly, but second is the largest allowed.
timestamp = partial(pa.field, type=pa.timestamp("s", tz="UTC"), nullable=False)
float_nullable = partial(pa.field, type=pa.float32(), nullable=True)
float_not_null = partial(pa.field, type=pa.float32(), nullable=False)
# (float32 can accurately hold integers up to 16,777,216 so no need for
# float64)
dict_nullable = partial(
pa.field,
type=pa.dictionary(pa.int8(), pa.string(), ordered=False),
nullable=True
)
return pa.schema([
dict_nullable("state"),
int_not_null("plant_id_eia"),
str_not_null("unitid"),
timestamp("operating_datetime_utc"),
float_nullable("operating_time_hours"),
float_not_null("gross_load_mw"),
float_nullable("steam_load_1000_lbs"),
float_nullable("so2_mass_lbs"),
dict_nullable("so2_mass_measurement_code"),
float_nullable("nox_rate_lbs_mmbtu"),
dict_nullable("nox_rate_measurement_code"),
float_nullable("nox_mass_lbs"),
dict_nullable("nox_mass_measurement_code"),
float_nullable("co2_mass_tons"),
dict_nullable("co2_mass_measurement_code"),
float_not_null("heat_content_mmbtu"),
int_nullable("facility_id"),
int_nullable("unit_id_epa"),
int_not_null("year"),
])
def epacems_to_parquet(datapkg_path,
epacems_years,
epacems_states,
out_dir,
compression='snappy',
partition_cols=('year', 'state'),
clobber=False):
"""Take transformed EPA CEMS dataframes and output them as Parquet files.
We need to do a few additional manipulations of the dataframes after they
have been transformed by PUDL to get them ready for output to the Apache
Parquet format. Mostly this has to do with ensuring homogeneous data types
across all of the dataframes, and downcasting to the most efficient data
type possible for each of them. We also add a 'year' column so that we can
partition the datset on disk by year as well as state.
(Year partitions follow the CEMS input data, based on local plant time.
The operating_datetime_utc identifies time in UTC, so there's a mismatch
of a few hours on December 31 / January 1.)
Args:
datapkg_path (path-like): Path to the datapackage.json file describing
the datapackage contaning the EPA CEMS data to be converted.
epacems_years (list): list of years from which we are trying to read
CEMS data
epacems_states (list): list of years from which we are trying to read
CEMS data
out_dir (path-like): The directory in which to output the Parquet files
compression (string):
partition_cols (tuple):
clobber (bool): If True and there is already a directory with out_dirs
name, the existing parquet files will be deleted and new ones will
be generated in their place.
Raises:
AssertionError: Raised if an output directory is not specified.
Todo:
Return to
"""
if not out_dir:
raise AssertionError("Required output directory not specified.")
out_dir = pudl.helpers.prep_dir(out_dir, clobber=clobber)
data_dir = pathlib.Path(datapkg_path).parent / "data"
# Verify that all the requested data files are present:
epacems_years = list(epacems_years)
epacems_years.sort()
epacems_states = list(epacems_states)
epacems_states.sort()
for year in epacems_years:
for state in epacems_states:
newpath = pathlib.Path(
data_dir,
f"hourly_emissions_epacems_{year}_{state.lower()}.csv.gz")
if not newpath.is_file():
raise FileNotFoundError(f"EPA CEMS file not found: {newpath}")
# TODO: Rather than going directly to the data directory, we should really
# use the metadata inside the datapackage to find the appropriate file
# paths pertaining to the CEMS years/states of interest.
in_types = create_in_dtypes()
schema = create_cems_schema()
for year in epacems_years:
for state in epacems_states:
newpath = pathlib.Path(
data_dir,
f"hourly_emissions_epacems_{year}_{state.lower()}.csv.gz")
df = (
pd.read_csv(
newpath, dtype=in_types, parse_dates=["operating_datetime_utc"]
)
.assign(year=year)
)
if len(df) == 0:
logger.info(f"Skipping {year}-{state}: 0 records found.")
else:
logger.info(f"{year}-{state}: {len(df)} records")
pq.write_to_dataset(
pa.Table.from_pandas(df, preserve_index=False, schema=schema),
root_path=str(out_dir),
partition_cols=list(partition_cols),
compression=compression
)
def parse_command_line(argv):
"""
Parse command line arguments. See the -h option.
Args:
argv (str): Command line arguments, including caller filename.
Returns:
dict: Dictionary of command line arguments and their parsed values.
"""
parser = argparse.ArgumentParser(description=__doc__)
defaults = pudl.workspace.setup.get_defaults()
parser.add_argument(
'datapkg',
type=str,
help="""Path to the datapackage.json file describing the datapackage
that contains the CEMS data to be converted.""",
)
parser.add_argument(
'-z',
'--compression',
type=str,
choices=["gzip", "snappy"],
help="""Compression algorithm to use for Parquet files. Can be either
'snappy' (much faster but larger files) or 'gzip' (slower but better
compression). (default: %(default)s).""",
default='snappy'
)
parser.add_argument(
'-i',
'--pudl_in',
type=str,
help="""Path to the top level datastore directory. (default:
%(default)s).""",
default=defaults["pudl_in"],
)
parser.add_argument(
'-o',
'--pudl_out',
type=str,
help="""Path to the pudl output directory. (default: %(default)s).""",
default=str(defaults["pudl_out"])
)
parser.add_argument(
'-y',
'--years',
nargs='+',
type=int,
help="""Which years of EPA CEMS data should be converted to Apache
Parquet format. Default is all available years, ranging from 1995 to
the present. Note that data is typically incomplete before ~2000.""",
default=pc.data_years['epacems']
)
parser.add_argument(
'-s',
'--states',
nargs='+',
type=str.upper,
help="""Which states EPA CEMS data should be converted to Apache
Parquet format, as a list of two letter US state abbreviations. Default
is everything: all 48 continental US states plus Washington DC.""",
default=pc.cems_states.keys()
)
parser.add_argument(
'-c',
'--clobber',
action='store_true',
help="""Clobber existing parquet files if they exist. If clobber is not
included but the parquet directory already exists the _build will
fail.""",
default=False)
arguments = parser.parse_args(argv[1:])
return arguments
def main():
"""Convert zipped EPA CEMS Hourly data to Apache Parquet format."""
# Display logged output from the PUDL package:
pudl_logger = logging.getLogger("pudl")
log_format = '%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s'
coloredlogs.install(fmt=log_format, level='INFO', logger=pudl_logger)
args = parse_command_line(sys.argv)
pudl_settings = pudl.workspace.setup.derive_paths(
pudl_in=args.pudl_in, pudl_out=args.pudl_out)
epacems_to_parquet(datapkg_path=pathlib.Path(args.datapkg),
epacems_years=args.years,
epacems_states=args.states,
out_dir=pathlib.Path(
pudl_settings['parquet_dir'], "epacems"),
compression=args.compression,
partition_cols=('year', 'state'),
clobber=args.clobber)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "catalyst-cooperative/pudl",
"path": "src/pudl/convert/epacems_to_parquet.py",
"copies": "1",
"size": "12122",
"license": "mit",
"hash": -7244998253044602000,
"line_mean": 36.4135802469,
"line_max": 83,
"alpha_frac": 0.6236594621,
"autogenerated": false,
"ratio": 3.8300157977883096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953121900064534,
"avg_score": 0.0001106719647549634,
"num_lines": 324
} |
##A script for creating a table
import numpy as np
## Load necessary modules
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def compare(first,second):
if float(first[-2])>float(second[-2]):
return 1
elif float(first[-2])<float(second[-2]):
return -1
else:
return 0
##load data for each cancer, find total genes in oncolnc, get patient info
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna','unc.edu.0cbec58e-f95e-4c60-a85d-210dc56bdf3c.1545137.rsem.genes.normalized_results'))
f.readline()
TCGA_id_to_gene={}
data=[i.split()[0] for i in f]
for i in data:
TCGA_id_to_gene[i.split('|')[1]]=i.split('|')[0]
f=open(os.path.join(BASE_DIR,'tables','gene_result.txt'))
f.readline()
current_id_to_gene={}
for i in f:
x=i.split('\t')
current_id_to_gene[x[2]]=x[5]
new_ids={}
f=open(os.path.join(BASE_DIR,'tables','new_ids_annotated.txt'))
data=[i.strip().split() for i in f]
for i in data:
if i[2]!='None':
new_ids[i[0]]=i[2]
all_ids={}
all_genes={}
allowed_ids={}
##this table only records genes present in oncolnc, and only genes which have a clear current id are allowed in oncolnc
for i in TCGA_id_to_gene:
if i in current_id_to_gene:
all_ids[i]=''
all_genes[current_id_to_gene[i]]=''
allowed_ids[i]=[i,current_id_to_gene[i]]
for i in TCGA_id_to_gene:
if i in new_ids:
if new_ids[i] not in all_ids:
all_ids[new_ids[i]]=''
all_genes[current_id_to_gene[new_ids[i]]]=''
allowed_ids[i]=[new_ids[i],current_id_to_gene[new_ids[i]]]
else:
pass
else:
pass
all_cancers=[]
cancers=['BLCA','BRCA','CESC','COAD','ESCA','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV',\
'PAAD','READ','SARC','SKCM','STAD','UCEC']
for cancer in cancers:
f=open(os.path.join(BASE_DIR,'mrna','cox',cancer,'coeffs_pvalues_adjusted.txt'))
cox_results=[i.strip().split() for i in f]
count=0
for index,i in enumerate(sorted(cox_results,cmp=compare)):
if i[0] in allowed_ids:
count+=1
f=open(os.path.join(BASE_DIR,'mrna','cox',cancer,'coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=count
f=open(os.path.join(BASE_DIR,'mrna','cox',cancer,'patient_info.txt'))
f.readline()
data=f.readline().strip().split()
all_cancers.append([genes_in_oncolnc]+data)
f=open('table_1.txt','w')
for i,j in zip(all_cancers,cancers):
f.write(j)
f.write('\t')
##write total patients (add males and females)
f.write(str(int(i[2])+int(i[3])))
f.write('\t')
##write male/female
f.write(i[2]+'/'+i[3])
f.write('\t')
##write average age at diagnosis
f.write(i[1])
f.write('\t')
##write events
f.write(i[4])
f.write('\t')
##write median survival
f.write(i[5])
f.write('\t')
##write genes in oncolnc
f.write(str(i[0]))
f.write('\t')
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "tables/Table1/table_creation.py",
"copies": "1",
"size": "3025",
"license": "mit",
"hash": 416637377168121000,
"line_mean": 24.6355932203,
"line_max": 140,
"alpha_frac": 0.5980165289,
"autogenerated": false,
"ratio": 2.6350174216027873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3733033950502787,
"avg_score": null,
"num_lines": null
} |
##A script for creating a table
## Load necessary modules
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
##load data for each cancer, find total genes in oncolnc, get patient info
f=open(os.path.join(BASE_DIR,'mirna','cox','BLCA','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','BLCA','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
BLCA=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','BRCA','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','BRCA','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
BRCA=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','CESC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','CESC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
CESC=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','COAD','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','COAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
COAD=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','ESCA','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','ESCA','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
ESCA=[genes_in_oncolnc]+data
##not all GBM data is included in oncolnc, need to count genes in oncolnc
f=open(os.path.join(BASE_DIR,'mirna','mature.fa'))
transcript_to_names={i.split()[1]:i.split()[0].strip('>') for i in f if '>' in i}
names_to_transcripts={i:j for i,j in zip(transcript_to_names.values(),transcript_to_names.keys())}
f=open(os.path.join(BASE_DIR,'mirna','cox','GBM','coeffs_pvalues.txt'))
data=[i.split() for i in f]
f2=open(os.path.join(BASE_DIR,'mirna','aliases.txt'))
aliases={}
for i in f2:
aliases[i.strip().split()[1]]=i.split()[0]
names=[i[0] for i in data]
all_aliases={}
for i in names:
for j in aliases:
if i in j.split(';'):
all_aliases[i]=all_aliases.get(i,[])+[[j,aliases[j]]]
count=0
for i in data:
if len(all_aliases[i[0]])==1:
if all_aliases[i[0]][0][1] in transcript_to_names:
count+=1
genes_in_oncolnc=count
f=open(os.path.join(BASE_DIR,'mirna','cox','GBM','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
GBM=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','HNSC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','HNSC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
HNSC=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
KIRC=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRP','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRP','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
KIRP=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','LAML','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','LAML','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LAML=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','LGG','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','LGG','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LGG=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','LIHC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','LIHC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LIHC=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','LUAD','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','LUAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LUAD=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','LUSC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','LUSC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LUSC=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','SKCM','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','SKCM','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
SKCM=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','OV','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','OV','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
OV=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','PAAD','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','PAAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
PAAD=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','READ','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','READ','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
READ=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','SARC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','SARC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
SARC=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','STAD','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','STAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
STAD=[genes_in_oncolnc]+data
f=open(os.path.join(BASE_DIR,'mirna','cox','UCEC','coeffs_pvalues.txt'))
data=[i for i in f]
genes_in_oncolnc=len(data)
f=open(os.path.join(BASE_DIR,'mirna','cox','UCEC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
UCEC=[genes_in_oncolnc]+data
all_cancers=[BLCA,BRCA,CESC,COAD,ESCA,GBM,HNSC,KIRC,KIRP,LAML,LGG,LIHC,LUAD,LUSC,OV,PAAD,READ,SARC,SKCM,STAD,UCEC]
names=['BLCA','BRCA','CESC','COAD','ESCA','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','PAAD',\
'READ','SARC','SKCM','STAD','UCEC']
f=open('table_2.txt','w')
for i,j in zip(all_cancers,names):
f.write(j)
f.write('\t')
##write total patients (add males and females)
f.write(str(int(i[2])+int(i[3])))
f.write('\t')
##write male/female
f.write(i[2]+'/'+i[3])
f.write('\t')
##write average age at diagnosis
f.write(i[1])
f.write('\t')
##write events
f.write(i[4])
f.write('\t')
##write median survival
f.write(i[5])
f.write('\t')
##write genes in oncolnc
f.write(str(i[0]))
f.write('\t')
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "tables/Table2/table_creation.py",
"copies": "1",
"size": "7470",
"license": "mit",
"hash": 9132579757509238000,
"line_mean": 29.2429149798,
"line_max": 116,
"alpha_frac": 0.6676037483,
"autogenerated": false,
"ratio": 2.382015306122449,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35496190544224493,
"avg_score": null,
"num_lines": null
} |
##A script for creating a table
## Load necessary modules
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
##load data for each cancer, find total genes analyzed and significant genes, get patient info
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
BLCA=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
BRCA=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
CESC=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
COAD=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
GBM=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
HNSC=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
KIRC=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
KIRP=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LAML=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LGG=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LIHC=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LUAD=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
LUSC=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','OV','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','OV','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
OV=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
SKCM=[genes_analyzed,sig]+data
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
genes_analyzed=len(ids)
sig=len([i for i in sorted(map(float,adjusted)) if i <=.05])
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','patient_info.txt'))
f.readline()
data=f.readline().strip().split()
STAD=[genes_analyzed,sig]+data
all_cancers=[BLCA,BRCA,CESC,COAD,GBM,HNSC,KIRC,KIRP,LAML,LGG,LIHC,LUAD,LUSC,SKCM,OV,STAD]
names=['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','SKCM','OV','STAD']
f=open('table_1.txt','w')
for i,j in zip(all_cancers,names):
f.write(j)
f.write('\t')
##write total patients (add males and females)
f.write(str(int(i[3])+int(i[4])))
f.write('\t')
##write median survival
f.write(i[6])
f.write('\t')
##write events
f.write(i[5])
f.write('\t')
##write average age at diagnosis
f.write(i[2])
f.write('\t')
##write male/female
f.write(i[3]+'/'+i[4])
f.write('\t')
##write genes analyzed
f.write(str(i[0]))
f.write('\t')
##write number of sig genes
f.write(str(i[1]))
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/tables/table 1/table_creation.py",
"copies": "1",
"size": "7812",
"license": "mit",
"hash": -2019849272320904700,
"line_mean": 33.4140969163,
"line_max": 115,
"alpha_frac": 0.7050691244,
"autogenerated": false,
"ratio": 2.6808510638297873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3885920188229787,
"avg_score": null,
"num_lines": null
} |
##A script for creating tables for each cancer, with the data sorted
def compare(first,second):
if float(first[-2])>float(second[-2]):
return 1
elif float(first[-2])<float(second[-2]):
return -1
else:
return 0
## Load necessary modules
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
##need to get the gene ids from a RNA-SEQV2 file, any file will work
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna','unc.edu.0cbec58e-f95e-4c60-a85d-210dc56bdf3c.1545137.rsem.genes.normalized_results'))
f.readline()
id_to_gene={}
data=[i.split()[0] for i in f]
for i in data:
id_to_gene[i.split('|')[1]]=i.split('|')[0]
##load the data that will be in the table for each cancer and add ids
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
BLCA=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
BRCA=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
CESC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
COAD=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
GBM=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
HNSC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
KIRC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
KIRP=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LAML=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LGG=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LIHC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LUAD=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LUSC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','OV','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
OV=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
SKCM=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
STAD=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
all_cancers=[BLCA,LGG,BRCA,CESC,COAD,GBM,HNSC,KIRC,KIRP,LAML,LIHC,LUAD,LUSC,SKCM,OV,STAD]
names=['BLCA','LGG','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LIHC','LUAD','LUSC','SKCM','OV','STAD']
for i,j in zip(names,all_cancers):
f=open(i+'.txt','w')
for k in sorted(j,cmp=compare):
for l in k:
f.write(l)
f.write('\t')
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/tables/S1/table_creation.py",
"copies": "1",
"size": "5368",
"license": "mit",
"hash": -3551850941878003000,
"line_mean": 40.2923076923,
"line_max": 140,
"alpha_frac": 0.7192622951,
"autogenerated": false,
"ratio": 2.7015601409159538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8681091297029659,
"avg_score": 0.047946227797258774,
"num_lines": 130
} |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/LUSC/patient_info.py",
"copies": "1",
"size": "6888",
"license": "mit",
"hash": -610288040859616800,
"line_mean": 30.1674208145,
"line_max": 132,
"alpha_frac": 0.6681184669,
"autogenerated": false,
"ratio": 2.95242177453922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.412054024143922,
"avg_score": null,
"num_lines": null
} |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/LUSC/patient_info.py",
"copies": "1",
"size": "6885",
"license": "mit",
"hash": 3045482645298794000,
"line_mean": 30.5825688073,
"line_max": 132,
"alpha_frac": 0.6687000726,
"autogenerated": false,
"ratio": 2.9435656263360412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9013037485555393,
"avg_score": 0.019845642676129416,
"num_lines": 218
} |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[-14])]
if i[42]=='Alive':
clinical4.append([i[0],int(i[52]),'Alive'])
elif i[42]=='Dead':
clinical4.append([i[0],int(i[53]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/LUSC/patient_info.py",
"copies": "1",
"size": "6093",
"license": "mit",
"hash": -1661354683965662200,
"line_mean": 28.4347826087,
"line_max": 132,
"alpha_frac": 0.6446742163,
"autogenerated": false,
"ratio": 2.9491771539206195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.409385137022062,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every BLCA lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## It was found that the v4.0 file contained more recent follow up data than v2.0, but the files contained nonredundant patients.
## So both files are loaded with the v4.0 getting preference.
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value and combining the lists.
clinical+=clinical2[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['High Grade']=1
grade_dict['Low Grade']=0
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('neoplasm_histologic_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
####It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
####All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
####also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
#### only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
#### A new list containing both follow up times and grade, sex, and age is constructed.
#### Only patients with grade, sex, and age information are included.
#### Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
##
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the BLCA patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','lncrna','BLCA.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','BLCA','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##Low Grade
lowgrade=[]
for ii in kaplan:
if ii[2]==0:
lowgrade.append(1)
else:
lowgrade.append(0)
##High Grade
highgrade=[]
for ii in kaplan:
if ii[2]==1:
highgrade.append(1)
else:
highgrade.append(0)
ro.globalenv['lowgrade']=ro.IntVector(lowgrade)
ro.globalenv['highgrade']=ro.IntVector(highgrade)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + lowgrade + highgrade + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','BLCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/BLCA/cox_regression.py",
"copies": "1",
"size": "11632",
"license": "mit",
"hash": -906815388999954600,
"line_mean": 35.0123839009,
"line_max": 142,
"alpha_frac": 0.6583562586,
"autogenerated": false,
"ratio": 3.145484045429962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9185043444019985,
"avg_score": 0.023759372001995425,
"num_lines": 323
} |
## A script for finding every cox coefficient and pvalue for every BRCA lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('histological_type')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the BRCA patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','lncrna','BRCA.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','BRCA','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##ductal
ductal=[]
for ii in kaplan:
if ii[2]==1:
ductal.append(1)
else:
ductal.append(0)
##metaplastic
metaplastic=[]
for ii in kaplan:
if ii[2]==3:
metaplastic.append(1)
else:
metaplastic.append(0)
##mucinous
mucinous=[]
for ii in kaplan:
if ii[2]==4:
mucinous.append(1)
else:
mucinous.append(0)
##medullary
medullary=[]
for ii in kaplan:
if ii[2]==5:
medullary.append(1)
else:
medullary.append(0)
##lobular
lobular=[]
for ii in kaplan:
if ii[2]==6:
lobular.append(1)
else:
lobular.append(0)
ro.globalenv['ductal']=ro.IntVector(ductal)
ro.globalenv['metaplastic']=ro.IntVector(metaplastic)
ro.globalenv['mucinous']=ro.IntVector(mucinous)
ro.globalenv['medullary']=ro.IntVector(medullary)
ro.globalenv['lobular']=ro.IntVector(lobular)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + ductal + metaplastic + mucinous + medullary + lobular + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','BRCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/BRCA/cox_regression.py",
"copies": "1",
"size": "14304",
"license": "mit",
"hash": 7387531978692712000,
"line_mean": 33.0571428571,
"line_max": 142,
"alpha_frac": 0.6505173378,
"autogenerated": false,
"ratio": 3.032435870256519,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4182953208056519,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every CESC lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_patient_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the CESC patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','lncrna','CESC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','CESC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + grade4 + age)') ## Perform Cox regression
# Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','CESC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/CESC/cox_regression.py",
"copies": "1",
"size": "11896",
"license": "mit",
"hash": -4674484514787050000,
"line_mean": 34.1952662722,
"line_max": 142,
"alpha_frac": 0.6519838601,
"autogenerated": false,
"ratio": 3.1214904224612963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4273474282561296,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every COAD lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_patient_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the COAD patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','lncrna','COAD.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','COAD','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','COAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/COAD/cox_regression.py",
"copies": "1",
"size": "9471",
"license": "mit",
"hash": 3362725805607594000,
"line_mean": 35.8521400778,
"line_max": 142,
"alpha_frac": 0.6732129659,
"autogenerated": false,
"ratio": 3.119565217391304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4292778183291304,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every GBM lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_patient_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','lncrna','GBM.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','GBM','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','GBM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/GBM/cox_regression.py",
"copies": "1",
"size": "9487",
"license": "mit",
"hash": 7810425097567212000,
"line_mean": 35.6293436293,
"line_max": 142,
"alpha_frac": 0.6731316538,
"autogenerated": false,
"ratio": 3.1217505758473183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9178877751794623,
"avg_score": 0.023200895570539174,
"num_lines": 259
} |
## A script for finding every cox coefficient and pvalue for every HNSC lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.8 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v4.8_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_patient_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the HNSC patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','lncrna','HNSC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','HNSC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','HNSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/HNSC/cox_regression.py",
"copies": "1",
"size": "11927",
"license": "mit",
"hash": 7812489676093152000,
"line_mean": 35.0332326284,
"line_max": 142,
"alpha_frac": 0.6519661273,
"autogenerated": false,
"ratio": 3.1157262277951934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9138600094849081,
"avg_score": 0.02581845204922257,
"num_lines": 331
} |
## A script for finding every cox coefficient and pvalue for every KIRC lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the KIRC patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','lncrna','KIRC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','KIRC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','KIRC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/KIRC/cox_regression.py",
"copies": "1",
"size": "10494",
"license": "mit",
"hash": -8114452723561608000,
"line_mean": 34.5728813559,
"line_max": 142,
"alpha_frac": 0.6591385554,
"autogenerated": false,
"ratio": 3.1093333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42684718887333334,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every KIRP lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirp.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_patient_kirp.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the KIRP patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','lncrna','KIRP.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','KIRP','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','KIRP','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/KIRP/cox_regression.py",
"copies": "1",
"size": "9448",
"license": "mit",
"hash": 1534683241645135000,
"line_mean": 36.4920634921,
"line_max": 142,
"alpha_frac": 0.6730524979,
"autogenerated": false,
"ratio": 3.107894736842105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4280947234742105,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every LAML lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
##LAML did not contain a follow_up file, which might explain why cbioportal didn't allow kaplans for this cancer for a period of time.
##However there was a lot of clinical data in the clinical_patient file
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
clinical4=[]
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','clinical','nationwidechildrens.org_clinical_patient_laml.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in clinical4 if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the LAML patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','lncrna','LAML.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LAML','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LAML','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/LAML/cox_regression.py",
"copies": "1",
"size": "7267",
"license": "mit",
"hash": -5186421158353493000,
"line_mean": 35.5175879397,
"line_max": 142,
"alpha_frac": 0.681299023,
"autogenerated": false,
"ratio": 3.130978026712624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4312277049712624,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every LGG lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','lncrna','LGG.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LGG','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade2 + grade3 + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LGG','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/LGG/cox_regression.py",
"copies": "1",
"size": "10067",
"license": "mit",
"hash": -7772444224204125000,
"line_mean": 35.082437276,
"line_max": 142,
"alpha_frac": 0.6656402106,
"autogenerated": false,
"ratio": 3.120582765034098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42862229756340975,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every LIHC lncRNA in the beta MiTranscriptome data set (normalized counts)
##load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_lihc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_patient_lihc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','lncrna','LIHC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LIHC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LIHC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/LIHC/cox_regression.py",
"copies": "1",
"size": "10519",
"license": "mit",
"hash": -7833376876707980000,
"line_mean": 33.6019736842,
"line_max": 142,
"alpha_frac": 0.658237475,
"autogenerated": false,
"ratio": 3.1213649851632046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9156488651517911,
"avg_score": 0.02462276172905875,
"num_lines": 304
} |
## A script for finding every cox coefficient and pvalue for every LUAD lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_luad.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_patient_luad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','lncrna','LUAD.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LUAD','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LUAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/LUAD/cox_regression.py",
"copies": "1",
"size": "9507",
"license": "mit",
"hash": -8872846751967021000,
"line_mean": 36.4291338583,
"line_max": 142,
"alpha_frac": 0.6731881771,
"autogenerated": false,
"ratio": 3.124219520210319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9178847827095018,
"avg_score": 0.023711974043060184,
"num_lines": 254
} |
## A script for finding every cox coefficient and pvalue for every LUSC lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','lncrna','LUSC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LUSC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LUSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/LUSC/cox_regression.py",
"copies": "1",
"size": "9500",
"license": "mit",
"hash": -7051402246589650000,
"line_mean": 36.2549019608,
"line_max": 142,
"alpha_frac": 0.6736842105,
"autogenerated": false,
"ratio": 3.115775664152181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4289459874652181,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in BLCA Tier 3 data downloaded Jan. 6th 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## It was found that the v4.0 file contained more recent follow up data than v2.0, but the files contained nonredundant patients.
## So both files are loaded with the v4.0 getting preference.
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value and combining the lists.
clinical+=clinical2[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['High Grade']=1
grade_dict['Low Grade']=0
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('neoplasm_histologic_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
####It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
####All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
####also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
#### only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
#### A new list containing both follow up times and grade, sex, and age is constructed.
#### Only patients with grade, sex, and age information are included.
#### Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
##
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-10 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','BLCA','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##Low Grade
lowgrade=[]
for ii in kaplan:
if ii[2]==0:
lowgrade.append(1)
else:
lowgrade.append(0)
##High Grade
highgrade=[]
for ii in kaplan:
if ii[2]==1:
highgrade.append(1)
else:
highgrade.append(0)
ro.globalenv['lowgrade']=ro.IntVector(lowgrade)
ro.globalenv['highgrade']=ro.IntVector(highgrade)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + lowgrade + highgrade + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','BLCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/BLCA/cox_regression.py",
"copies": "1",
"size": "11660",
"license": "mit",
"hash": -3303342178257248000,
"line_mean": 35.3239875389,
"line_max": 142,
"alpha_frac": 0.6439108062,
"autogenerated": false,
"ratio": 3.0603674540682415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4204278260268241,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in BRCA Tier 3 data downloaded Jan. 6th 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('histological_type')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','BRCA','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##ductal
ductal=[]
for ii in kaplan:
if ii[2]==1:
ductal.append(1)
else:
ductal.append(0)
##metaplastic
metaplastic=[]
for ii in kaplan:
if ii[2]==3:
metaplastic.append(1)
else:
metaplastic.append(0)
##mucinous
mucinous=[]
for ii in kaplan:
if ii[2]==4:
mucinous.append(1)
else:
mucinous.append(0)
##medullary
medullary=[]
for ii in kaplan:
if ii[2]==5:
medullary.append(1)
else:
medullary.append(0)
##lobular
lobular=[]
for ii in kaplan:
if ii[2]==6:
lobular.append(1)
else:
lobular.append(0)
ro.globalenv['ductal']=ro.IntVector(ductal)
ro.globalenv['metaplastic']=ro.IntVector(metaplastic)
ro.globalenv['mucinous']=ro.IntVector(mucinous)
ro.globalenv['medullary']=ro.IntVector(medullary)
ro.globalenv['lobular']=ro.IntVector(lobular)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + ductal + metaplastic + mucinous + medullary + lobular + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','BRCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/BRCA/cox_regression.py",
"copies": "1",
"size": "14465",
"license": "mit",
"hash": 2730063554618802000,
"line_mean": 33.3586698337,
"line_max": 142,
"alpha_frac": 0.6398202558,
"autogenerated": false,
"ratio": 2.9738898026315788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41137100584315783,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in CESC Tier 3 data downloaded Jan 6th 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_patient_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
#### This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','CESC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + grade4 + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','CESC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/CESC/cox_regression.py",
"copies": "1",
"size": "12060",
"license": "mit",
"hash": -5372441177180242000,
"line_mean": 34.366568915,
"line_max": 142,
"alpha_frac": 0.6389718076,
"autogenerated": false,
"ratio": 3.0477634571645185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41867352647645184,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in COAD Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_patient_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'hg19.isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
#### Only want mirnas that meet an expression cutoff
#### A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','COAD','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','COAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/COAD/cox_regression.py",
"copies": "1",
"size": "9643",
"license": "mit",
"hash": 8954970475131569000,
"line_mean": 36.2316602317,
"line_max": 142,
"alpha_frac": 0.6564347195,
"autogenerated": false,
"ratio": 3.0257295262001884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41821642457001884,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in ESCA Tier 3 data downloaded Jan 6th 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_esca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','clinical','nationwidechildrens.org_clinical_patient_esca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mirna files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','ESCA','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','ESCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/ESCA/cox_regression.py",
"copies": "1",
"size": "10583",
"license": "mit",
"hash": -4095795354324827600,
"line_mean": 34.8745762712,
"line_max": 142,
"alpha_frac": 0.6485873571,
"autogenerated": false,
"ratio": 3.0437158469945356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9067101626070919,
"avg_score": 0.025040315604723452,
"num_lines": 295
} |
## A script for finding every cox coefficient and pvalue for every miRNA in GBM Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_patient_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
##only microarray data was available for GBM
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
## The normalized data files are used
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of mirnas is constructed, the order of mirna lists is same as the clinical_and_files data
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mirna',j))
f.readline()
f.readline()
temp.append([[i.split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of 1 microarray unit and no more than a fourth of the patients containing no expression was chosen
## Basically all mirnas are taken for this cancer
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>1:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file several MBs which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','GBM','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['mirna']=ro.FloatVector(data) ##no inverse normal transformation for this cancer
## Prepare the variables for rpy2
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','GBM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/GBM/cox_regression.py",
"copies": "1",
"size": "9135",
"license": "mit",
"hash": -1769685292696447000,
"line_mean": 34.5447470817,
"line_max": 134,
"alpha_frac": 0.6556102901,
"autogenerated": false,
"ratio": 3.0592766242464835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9099066748958149,
"avg_score": 0.023164033077666808,
"num_lines": 257
} |
## A script for finding every cox coefficient and pvalue for every miRNA in HNSC Tier 3 data downloaded Jan 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.8 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v4.8_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_patient_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mirna files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','HNSC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','HNSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/HNSC/cox_regression.py",
"copies": "1",
"size": "12092",
"license": "mit",
"hash": -1552545316746449000,
"line_mean": 34.9880952381,
"line_max": 142,
"alpha_frac": 0.6389348329,
"autogenerated": false,
"ratio": 3.0420125786163523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4180947411516352,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in KIRC Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mirna files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/KIRC/cox_regression.py",
"copies": "1",
"size": "10658",
"license": "mit",
"hash": 2355818516007938000,
"line_mean": 34.7651006711,
"line_max": 142,
"alpha_frac": 0.6443047476,
"autogenerated": false,
"ratio": 3.024404086265607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41687088338656075,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in KIRP Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirp.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_patient_kirp.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##There were some duplicate file names, but different data. I assume these are technical replicates. Those
##files were merged together.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRP','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRP','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/KIRP/cox_regression.py",
"copies": "1",
"size": "9616",
"license": "mit",
"hash": 5556062649598266000,
"line_mean": 37.1587301587,
"line_max": 142,
"alpha_frac": 0.6566139767,
"autogenerated": false,
"ratio": 3.015365318281593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9050498201795854,
"avg_score": 0.024296218637148044,
"num_lines": 252
} |
## A script for finding every cox coefficient and pvalue for every miRNA in LAML Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
##LAML did not contain a follow_up file, which might explain why cbioportal didn't allow kaplans for this cancer for a period of time.
##However there was a lot of clinical data in the clinical_patient file
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
clinical4=[]
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','clinical','nationwidechildrens.org_clinical_patient_laml.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in clinical4 if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'hg19.isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='03':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','LAML','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','LAML','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/LAML/cox_regression.py",
"copies": "1",
"size": "7430",
"license": "mit",
"hash": -772812124370821000,
"line_mean": 36.15,
"line_max": 142,
"alpha_frac": 0.6596231494,
"autogenerated": false,
"ratio": 3.005663430420712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9034890632719332,
"avg_score": 0.02607918942027581,
"num_lines": 200
} |
## A script for finding every cox coefficient and pvalue for every miRNA in LGG Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','LGG','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade2 + grade3 + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','LGG','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/LGG/cox_regression.py",
"copies": "1",
"size": "10226",
"license": "mit",
"hash": -1702310522581083600,
"line_mean": 35.5214285714,
"line_max": 142,
"alpha_frac": 0.6501075689,
"autogenerated": false,
"ratio": 3.030823947836396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9058479454512889,
"avg_score": 0.024490412444701428,
"num_lines": 280
} |
## A script for finding every cox coefficient and pvalue for every miRNA in LIHC Tier 3 data downloaded Jan. 6th, 2016
##load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_lihc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_patient_lihc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','LIHC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','LIHC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/LIHC/cox_regression.py",
"copies": "1",
"size": "10678",
"license": "mit",
"hash": 6980963000249970000,
"line_mean": 34.0098360656,
"line_max": 142,
"alpha_frac": 0.6434725604,
"autogenerated": false,
"ratio": 3.0352472996020468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9055132514057029,
"avg_score": 0.024717469189003548,
"num_lines": 305
} |
## A script for finding every cox coefficient and pvalue for every miRNA in LUAD Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_luad.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_patient_luad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','LUAD','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','LUAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/LUAD/cox_regression.py",
"copies": "1",
"size": "9666",
"license": "mit",
"hash": -6026977238778096000,
"line_mean": 36.9058823529,
"line_max": 142,
"alpha_frac": 0.6566314918,
"autogenerated": false,
"ratio": 3.029144468818552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4185775960618552,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in LUSC Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','LUSC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','LUSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/LUSC/cox_regression.py",
"copies": "1",
"size": "9660",
"license": "mit",
"hash": -3005834714614659600,
"line_mean": 36.5875486381,
"line_max": 142,
"alpha_frac": 0.6570393375,
"autogenerated": false,
"ratio": 3.020637898686679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4177677236186679,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in OV Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_ov.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_patient_ov.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','OV','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'hg19.isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','OV','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','OV','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','OV','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/OV/cox_regression.py",
"copies": "1",
"size": "10377",
"license": "mit",
"hash": 7671918509012205000,
"line_mean": 34.2959183673,
"line_max": 142,
"alpha_frac": 0.6467187048,
"autogenerated": false,
"ratio": 3.038653001464129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4185371706264129,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in PAAD Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','clinical','nationwidechildrens.org_clinical_follow_up_v4.4_paad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
##Note: three tier and four tier systems are mixed.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','clinical','nationwidechildrens.org_clinical_patient_paad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','PAAD','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','PAAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/PAAD/cox_regression.py",
"copies": "1",
"size": "10704",
"license": "mit",
"hash": -3085435677635653000,
"line_mean": 35.0404040404,
"line_max": 142,
"alpha_frac": 0.6448056801,
"autogenerated": false,
"ratio": 3.033153867951261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4177959548051261,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in READ Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','READ','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_read.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','READ','clinical','nationwidechildrens.org_clinical_patient_read.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','READ','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'hg19.isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','READ','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','READ','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','READ','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/READ/cox_regression.py",
"copies": "1",
"size": "9664",
"license": "mit",
"hash": 6353988321956900000,
"line_mean": 36.75,
"line_max": 142,
"alpha_frac": 0.6571812914,
"autogenerated": false,
"ratio": 3.0351758793969847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9074337104203714,
"avg_score": 0.02360401331865441,
"num_lines": 256
} |
## A script for finding every cox coefficient and pvalue for every miRNA in SARC Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_sarc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','clinical','nationwidechildrens.org_clinical_patient_sarc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','SARC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','SARC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/SARC/cox_regression.py",
"copies": "1",
"size": "9640",
"license": "mit",
"hash": 5407489945504302000,
"line_mean": 36.65625,
"line_max": 142,
"alpha_frac": 0.6566390041,
"autogenerated": false,
"ratio": 3.027638190954774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9066128276464974,
"avg_score": 0.023629783717959964,
"num_lines": 256
} |
## A script for finding every cox coefficient and pvalue for every miRNA in SKCM Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_skcm.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_patient_skcm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor,06 a metastatic, both were allowed for SKCM
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01'or i[1].split('-')[3][:-1]=='06':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','SKCM','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','SKCM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/SKCM/cox_regression.py",
"copies": "1",
"size": "9664",
"license": "mit",
"hash": -7430986892123306000,
"line_mean": 37.3492063492,
"line_max": 142,
"alpha_frac": 0.6553187086,
"autogenerated": false,
"ratio": 3.0209440450140668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4176262753614067,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in STAD Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_stad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_patient_stad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','STAD','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','STAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/STAD/cox_regression.py",
"copies": "1",
"size": "10460",
"license": "mit",
"hash": 1931067833772044000,
"line_mean": 34.3378378378,
"line_max": 142,
"alpha_frac": 0.6471319312,
"autogenerated": false,
"ratio": 3.0336426914153134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41807746226153136,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every miRNA in UCEC Tier 3 data downloaded Jan. 6th 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v1.7_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['High Grade']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_patient_ucec.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('neoplasm_histologic_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','UCEC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','UCEC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/cox/UCEC/cox_regression.py",
"copies": "1",
"size": "13817",
"license": "mit",
"hash": -2111816502972982300,
"line_mean": 33.8035264484,
"line_max": 142,
"alpha_frac": 0.639067815,
"autogenerated": false,
"ratio": 2.978443630092692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8998832171410789,
"avg_score": 0.023735854736380475,
"num_lines": 397
} |
## A script for finding every cox coefficient and pvalue for every mRNA in BLCA Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## Read the follow up data
## It was found that the v4.0 file contained more recent follow up data than v2.0, but the files contained nonredundant patients.
## So both files are loaded with the v4.0 getting preference.
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[8]=='Alive':
clinical1[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical1[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical1.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical1.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
try:
if clinical2[-1][0]==i[0]:
if i[6]=='Alive':
clinical2[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
clinical2[-1]=[i[0],int(i[8]),'Dead']
else:
pass
else:
if i[6]=='Alive':
clinical2.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
clinical2.append([i[0],int(i[8]),'Dead'])
else:
pass
except:
pass
## Removing the empty value and combining the lists.
clinical+=clinical2[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['High Grade']=1
grade_dict['Low Grade']=0
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[-5]],sex_dict[i[6]],int(i[42])]
if i[21]=='Alive':
clinical4.append([i[0],int(i[22]),'Alive'])
elif i[21]=='Dead':
clinical4.append([i[0],int(i[23]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
##normalized files were used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##Low Grade
lowgrade=[]
for ii in kaplan:
if ii[2]==0:
lowgrade.append(1)
else:
lowgrade.append(0)
##High Grade
highgrade=[]
for ii in kaplan:
if ii[2]==1:
highgrade.append(1)
else:
highgrade.append(0)
ro.globalenv['lowgrade']=ro.IntVector(lowgrade)
ro.globalenv['highgrade']=ro.IntVector(highgrade)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + lowgrade + highgrade + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/BLCA/cox_regression.py",
"copies": "1",
"size": "10115",
"license": "mit",
"hash": 3971779708980136000,
"line_mean": 32.4933774834,
"line_max": 142,
"alpha_frac": 0.6125556105,
"autogenerated": false,
"ratio": 3.1422802112457284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4254835821745728,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in BLCA Tier 3 data downloaded Jan. 5th 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## It was found that the v4.0 file contained more recent follow up data than v2.0, but the files contained nonredundant patients.
## So both files are loaded with the v4.0 getting preference.
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value and combining the lists.
clinical+=clinical2[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['High Grade']=1
grade_dict['Low Grade']=0
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('neoplasm_histologic_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
####It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
####All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
####also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
#### only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
#### A new list containing both follow up times and grade, sex, and age is constructed.
#### Only patients with grade, sex, and age information are included.
#### Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
##
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
##normalized files were used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','BLCA','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##Low Grade
lowgrade=[]
for ii in kaplan:
if ii[2]==0:
lowgrade.append(1)
else:
lowgrade.append(0)
##High Grade
highgrade=[]
for ii in kaplan:
if ii[2]==1:
highgrade.append(1)
else:
highgrade.append(0)
ro.globalenv['lowgrade']=ro.IntVector(lowgrade)
ro.globalenv['highgrade']=ro.IntVector(highgrade)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + lowgrade + highgrade + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','BLCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/BLCA/cox_regression.py",
"copies": "1",
"size": "11254",
"license": "mit",
"hash": 6934990945183843000,
"line_mean": 34.726984127,
"line_max": 142,
"alpha_frac": 0.6395948107,
"autogenerated": false,
"ratio": 3.1226415094339623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9141241577317418,
"avg_score": 0.024198948563308748,
"num_lines": 315
} |
## A script for finding every cox coefficient and pvalue for every mRNA in BRCA Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## There were three clinical files with nonredundant data. V4.0 was found to be most up to date.
## V2.1 was more up to date than V1.5. All three files are loaded with the more up to date file getting preference.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[0]:
if i[8]=='Alive':
clinical3[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
try:
clinical3[-1]=[i[0],int(i[10]),'Dead']
except:
pass
else:
pass
else:
if i[8]=='Alive':
clinical3.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
try:
clinical3.append([i[0],int(i[10]),'Dead'])
except:
pass
else:
pass
## Removing the empty value.
clinical=clinical3[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
if clinical1[-1][0]==i[0]:
if i[6]=='Alive':
clinical1[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
try:
clinical1[-1]=[i[0],int(i[8]),'Dead']
except:
pass
else:
pass
else:
if i[6]=='Alive':
clinical1.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
try:
clinical1.append([i[0],int(i[8]),'Dead'])
except:
pass
else:
pass
##merging data and removing the empty value
clinical+=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[0]:
try:
if i[6]=='Alive':
clinical2[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
try:
clinical2[-1]=[i[0],int(i[8]),'Dead']
except:
pass
else:
pass
except:
pass
else:
try:
if i[6]=='Alive':
clinical2.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
try:
clinical2.append([i[0],int(i[8]),'Dead'])
except:
pass
else:
pass
except:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[-11]],sex_dict[i[6]],int(i[20])]
except:
pass
if i[13]=='Alive':
clinical4.append([i[0],int(i[14]),'Alive'])
elif i[13]=='Dead':
try:
clinical4.append([i[0],int(i[15]),'Dead'])
except:
pass
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##ductal
ductal=[]
for ii in kaplan:
if ii[2]==1:
ductal.append(1)
else:
ductal.append(0)
##metaplastic
metaplastic=[]
for ii in kaplan:
if ii[2]==3:
metaplastic.append(1)
else:
metaplastic.append(0)
##mucinous
mucinous=[]
for ii in kaplan:
if ii[2]==4:
mucinous.append(1)
else:
mucinous.append(0)
##medullary
medullary=[]
for ii in kaplan:
if ii[2]==5:
medullary.append(1)
else:
medullary.append(0)
##lobular
lobular=[]
for ii in kaplan:
if ii[2]==6:
lobular.append(1)
else:
lobular.append(0)
ro.globalenv['ductal']=ro.IntVector(ductal)
ro.globalenv['metaplastic']=ro.IntVector(metaplastic)
ro.globalenv['mucinous']=ro.IntVector(mucinous)
ro.globalenv['medullary']=ro.IntVector(medullary)
ro.globalenv['lobular']=ro.IntVector(lobular)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + ductal + metaplastic + mucinous + medullary + lobular + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/BRCA/cox_regression.py",
"copies": "1",
"size": "12294",
"license": "mit",
"hash": -6061850855121882000,
"line_mean": 30.442455243,
"line_max": 142,
"alpha_frac": 0.5896372214,
"autogenerated": false,
"ratio": 3.1595990747879723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4249236296187972,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in BRCA Tier 3 data downloaded Jan. 5th 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('histological_type')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','BRCA','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##ductal
ductal=[]
for ii in kaplan:
if ii[2]==1:
ductal.append(1)
else:
ductal.append(0)
##metaplastic
metaplastic=[]
for ii in kaplan:
if ii[2]==3:
metaplastic.append(1)
else:
metaplastic.append(0)
##mucinous
mucinous=[]
for ii in kaplan:
if ii[2]==4:
mucinous.append(1)
else:
mucinous.append(0)
##medullary
medullary=[]
for ii in kaplan:
if ii[2]==5:
medullary.append(1)
else:
medullary.append(0)
##lobular
lobular=[]
for ii in kaplan:
if ii[2]==6:
lobular.append(1)
else:
lobular.append(0)
ro.globalenv['ductal']=ro.IntVector(ductal)
ro.globalenv['metaplastic']=ro.IntVector(metaplastic)
ro.globalenv['mucinous']=ro.IntVector(mucinous)
ro.globalenv['medullary']=ro.IntVector(medullary)
ro.globalenv['lobular']=ro.IntVector(lobular)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + ductal + metaplastic + mucinous + medullary + lobular + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','BRCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/BRCA/cox_regression.py",
"copies": "1",
"size": "14067",
"license": "mit",
"hash": 5882410756879780000,
"line_mean": 32.7338129496,
"line_max": 142,
"alpha_frac": 0.6362408474,
"autogenerated": false,
"ratio": 3.0186695278969955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4154910375296995,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in CESC Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_cesc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[10]=='Alive':
clinical1[-1]=[i[0],int(i[11]),'Alive']
elif i[10]=='Dead':
clinical1[-1]=[i[0],int(i[12]),'Dead']
else:
pass
else:
if i[10]=='Alive':
clinical1.append([i[0],int(i[11]),'Alive'])
elif i[10]=='Dead':
clinical1.append([i[0],int(i[12]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_cesc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
try:
if clinical2[-1][0]==i[0]:
if i[6]=='Alive':
clinical2[-1]=[i[0],int(i[7]),'Alive']
elif i[6]=='Dead':
clinical2[-1]=[i[0],int(i[8]),'Dead']
else:
pass
else:
if i[6]=='Alive':
clinical2.append([i[0],int(i[7]),'Alive'])
elif i[6]=='Dead':
clinical2.append([i[0],int(i[8]),'Dead'])
else:
pass
except:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_patient_cesc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[40]],sex_dict[i[6]],int(i[42])]
if i[15]=='Alive':
clinical4.append([i[0],int(i[16]),'Alive'])
elif i[15]=='Dead':
clinical4.append([i[0],int(i[17]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','CESC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/CESC/cox_regression.py",
"copies": "1",
"size": "10559",
"license": "mit",
"hash": -1478629582691320600,
"line_mean": 31.3895705521,
"line_max": 142,
"alpha_frac": 0.6079174164,
"autogenerated": false,
"ratio": 3.1285925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4236510008992593,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in CESC Tier 3 data downloaded Jan 5th 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_patient_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
#### Only want genes that meet an expression cutoff
#### A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','CESC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','CESC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/CESC/cox_regression.py",
"copies": "1",
"size": "11661",
"license": "mit",
"hash": 5131012387982858000,
"line_mean": 33.6023738872,
"line_max": 142,
"alpha_frac": 0.6346797016,
"autogenerated": false,
"ratio": 3.1104294478527605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42451091494527604,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in COAD Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_coad.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[9]=='Alive':
clinical[-1]=[i[0],int(i[10]),'Alive']
elif i[9]=='Dead':
clinical[-1]=[i[0],int(i[11]),'Dead']
else:
pass
else:
if i[9]=='Alive':
clinical.append([i[0],int(i[10]),'Alive'])
elif i[9]=='Dead':
clinical.append([i[0],int(i[11]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_patient_coad.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[53])]
if i[23]=='Alive':
clinical4.append([i[0],int(i[24]),'Alive'])
elif i[23]=='Dead':
clinical4.append([i[0],int(i[25]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','COAD','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/COAD/cox_regression.py",
"copies": "1",
"size": "8485",
"license": "mit",
"hash": -6111629300363947000,
"line_mean": 33.3522267206,
"line_max": 143,
"alpha_frac": 0.6321744255,
"autogenerated": false,
"ratio": 3.1171932402645113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42493676657645113,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in COAD Tier 3 data downloaded Jan. 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_patient_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
#### Only want genes that meet an expression cutoff
#### A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','COAD','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','COAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/COAD/cox_regression.py",
"copies": "1",
"size": "9249",
"license": "mit",
"hash": -4718978002478678000,
"line_mean": 35.2705882353,
"line_max": 143,
"alpha_frac": 0.650556817,
"autogenerated": false,
"ratio": 3.102650117410265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9135822977684482,
"avg_score": 0.02347679134515655,
"num_lines": 255
} |
## A script for finding every cox coefficient and pvalue for every mRNA in ESCA Tier 3 data downloaded Jan 5th 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_esca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','clinical','nationwidechildrens.org_clinical_patient_esca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','ESCA','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','ESCA','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','ESCA','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/ESCA/cox_regression.py",
"copies": "1",
"size": "10196",
"license": "mit",
"hash": 3291891384934671400,
"line_mean": 33.7986348123,
"line_max": 142,
"alpha_frac": 0.6432914869,
"autogenerated": false,
"ratio": 3.119951040391677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4263242527291677,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in GBM Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_gbm.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
if clinical[-1][0]==i[0]:
if i[9]=='Alive':
clinical[-1]=[i[0],int(i[10]),'Alive']
elif i[9]=='Dead':
clinical[-1]=[i[0],int(i[11]),'Dead']
else:
pass
else:
if i[9]=='Alive':
clinical.append([i[0],int(i[10]),'Alive'])
elif i[9]=='Dead':
clinical.append([i[0],int(i[11]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_patient_gbm.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[26])]
if i[15]=='Alive':
clinical4.append([i[0],int(i[16]),'Alive'])
elif i[15]=='Dead':
clinical4.append([i[0],int(i[17]),'Dead'])
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','GBM','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/GBM/cox_regression.py",
"copies": "1",
"size": "8147",
"license": "mit",
"hash": 761838528711713800,
"line_mean": 33.5211864407,
"line_max": 142,
"alpha_frac": 0.6407266478,
"autogenerated": false,
"ratio": 3.0848163574403635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42255430052403636,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in GBM Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_patient_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','GBM','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','GBM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/GBM/cox_regression.py",
"copies": "1",
"size": "9246",
"license": "mit",
"hash": -1008117323300189300,
"line_mean": 34.8372093023,
"line_max": 142,
"alpha_frac": 0.6519576033,
"autogenerated": false,
"ratio": 3.103726082578046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4255683685878046,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in HNSC Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## There were two clinical files with nonredundant data. V4.8 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v4.8_hnsc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[8]=='Alive':
clinical1[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical1[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical1.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical1.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_hnsc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[0] not in [j[0] for j in clinical]:
try:
if clinical2[-1][0]==i[0]:
if i[8]=='Alive':
clinical2[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical2[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical2.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical2.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_patient_hnsc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[35]],sex_dict[i[8]],int(i[51])]
if i[25]=='Alive':
clinical4.append([i[0],int(i[26]),'Alive'])
elif i[25]=='Dead':
clinical4.append([i[0],int(i[27]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/HNSC/cox_regression.py",
"copies": "1",
"size": "10573",
"license": "mit",
"hash": -9125553782175850000,
"line_mean": 31.8354037267,
"line_max": 143,
"alpha_frac": 0.6070178757,
"autogenerated": false,
"ratio": 3.1188790560471977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4225896931747198,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in HNSC Tier 3 data downloaded Jan 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.8 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v4.8_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','clinical','nationwidechildrens.org_clinical_patient_hnsc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','HNSC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','HNSC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','HNSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/HNSC/cox_regression.py",
"copies": "1",
"size": "11703",
"license": "mit",
"hash": 3601466821139916000,
"line_mean": 34.1441441441,
"line_max": 143,
"alpha_frac": 0.6341109117,
"autogenerated": false,
"ratio": 3.1034208432776453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9107616126287287,
"avg_score": 0.025983125738071686,
"num_lines": 333
} |
## A script for finding every cox coefficient and pvalue for every mRNA in KIRC Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[8]],int(i[-16])]
if i[24]=='Alive':
clinical4.append([i[0],int(i[25]),'Alive'])
elif i[24]=='Dead':
clinical4.append([i[0],int(i[26]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/KIRC/cox_regression.py",
"copies": "1",
"size": "9445",
"license": "mit",
"hash": -7048869404970509000,
"line_mean": 31.7951388889,
"line_max": 142,
"alpha_frac": 0.6208575966,
"autogenerated": false,
"ratio": 3.1079302402105955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9095387787475149,
"avg_score": 0.026680009867089464,
"num_lines": 288
} |
## A script for finding every cox coefficient and pvalue for every mRNA in KIRC Tier 3 data downloaded Jan. 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','KIRC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','KIRC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/KIRC/cox_regression.py",
"copies": "1",
"size": "10258",
"license": "mit",
"hash": -3638448818155716000,
"line_mean": 33.6554054054,
"line_max": 142,
"alpha_frac": 0.6396958471,
"autogenerated": false,
"ratio": 3.095353047676524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4235048894776524,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in KIRP Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirp.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[8]=='Alive':
clinical1[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical1[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical1.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical1.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_patient_kirp.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[0]]=[0,sex_dict[i[8]],int(i[17])]
if i[30]=='Alive':
clinical4.append([i[0],int(i[31]),'Alive'])
elif i[30]=='Dead':
clinical4.append([i[0],int(i[32]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/KIRP/cox_regression.py",
"copies": "1",
"size": "8289",
"license": "mit",
"hash": -6886103651173132000,
"line_mean": 33.1111111111,
"line_max": 143,
"alpha_frac": 0.6297502714,
"autogenerated": false,
"ratio": 3.0814126394052046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4211162910805204,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in KIRP Tier 3 data downloaded Jan. 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirp.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','clinical','nationwidechildrens.org_clinical_patient_kirp.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRP','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','KIRP','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','KIRP','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/KIRP/cox_regression.py",
"copies": "1",
"size": "9021",
"license": "mit",
"hash": 8307401601167608000,
"line_mean": 35.2289156627,
"line_max": 143,
"alpha_frac": 0.6485977164,
"autogenerated": false,
"ratio": 3.074642126789366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.910299631955338,
"avg_score": 0.02404870472719711,
"num_lines": 249
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LAML Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
##LAML did not contain a follow_up file, which might explain why cbioportal doesn't allow kaplans for this cancer
##However there was a lot of clinical data in the clinical_patient file
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
clinical=[]
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','clinical','nationwidechildrens.org_clinical_patient_laml.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[0,sex_dict[i[4]],int(i[24])]
if i[15]=='Alive':
clinical.append([i[0],int(i[16]),'Alive'])
elif i[15]=='Dead':
clinical.append([i[0],int(i[17]),'Dead'])
else:
pass
except:
pass
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 03 indicates a primary blood tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='03':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','LAML','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/LAML/cox_regression.py",
"copies": "1",
"size": "6524",
"license": "mit",
"hash": -7133557067043312000,
"line_mean": 33.5185185185,
"line_max": 143,
"alpha_frac": 0.6434702636,
"autogenerated": false,
"ratio": 3.1007604562737643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42442307198737644,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LAML Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
##LAML did not contain a follow_up file, which might explain why cbioportal didn't allow kaplans for this cancer for a period of time.
##However there was a lot of clinical data in the clinical_patient file
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
clinical4=[]
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','clinical','nationwidechildrens.org_clinical_patient_laml.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in clinical4 if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 03 indicates a primary blood tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='03':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LAML','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','LAML','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','LAML','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/LAML/cox_regression.py",
"copies": "1",
"size": "6971",
"license": "mit",
"hash": -609931933921508900,
"line_mean": 34.5663265306,
"line_max": 143,
"alpha_frac": 0.6522737054,
"autogenerated": false,
"ratio": 3.099599822143175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42518735275431746,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LGG Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[10]],int(i[-12])]
if i[39]=='Alive':
clinical4.append([i[0],int(i[40]),'Alive'])
elif i[39]=='Dead':
clinical4.append([i[0],int(i[41]),'Dead'])
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','LGG','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade2 + grade3 + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/LGG/cox_regression.py",
"copies": "1",
"size": "8870",
"license": "mit",
"hash": 4592270440754724000,
"line_mean": 32.5984848485,
"line_max": 142,
"alpha_frac": 0.6340473506,
"autogenerated": false,
"ratio": 3.0820013898540655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42160487404540653,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.