repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
eventifyio/eventify | eventify/drivers/crossbar.py | Component.onClose | python | def onClose(self, wasClean):
self.log.error('lost connection to crossbar on session %' + str(self.session_id))
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop() | Disconnect when connection to message
broker is lost | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L63-L71 | null | class Component(BaseComponent, ApplicationSession):
"""
Handle subscribing to topics
"""
log = logging.getLogger("eventify.drivers.crossbar")
async def emit_event(self, event):
"""
Publish an event back to crossbar
:param event: Event object
"""
self.log.debug("publishing event on %s", self.publish_topic)
if self.config.extra['config']['pub_options']['retain']:
try:
await persist_event(
self.publish_topic,
event,
self.pool
)
except SystemError as error:
self.log.error(error)
return
try:
await self.publish(
self.publish_topic,
event.__dict__,
options=self.publish_options
)
except TransportLost as error:
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
self.log.error(error)
def onDisconnect(self):
"""
Event fired when transport is lost
"""
self.log.error('onDisconnect event fired')
def onLeave(self, reason=None, message=None):
"""
:param reason:
:param message:
"""
self.log.info('Leaving realm; reason: %s', reason)
def onUserError(self, fail, message):
"""
Handle user errors
"""
self.log.error(fail)
self.log.error(message)
async def onJoin(self, details):
self.log.debug("joined websocket realm: %s", details)
# set session_id for reconnect
self.session_id = details.session
self.realm_id = details.realm
for handler in self.handlers:
# initialize handler
handler_instance = handler()
handler_instance.set_session(self)
if hasattr(handler_instance, 'init'):
await handler_instance.init()
if hasattr(handler_instance, 'on_event'):
self.log.debug("subscribing to topic %s", handler_instance.subscribe_topic)
# Used with base handler defined subscribe_topic
if handler_instance.subscribe_topic is not None:
await self.subscribe(
handler_instance.on_event,
handler_instance.subscribe_topic,
)
self.log.debug("subscribed to topic: %s", handler_instance.subscribe_topic)
else:
# Used with config.json defined topics
if self.subscribed_topics is not None:
for topic in self.subscribed_topics:
await self.subscribe(
handler_instance.on_event,
topic
)
self.log.debug("subscribed to topic: %s", topic)
if hasattr(handler_instance, 'worker'):
# or just await handler.worker()
while True:
try:
await handler_instance.worker()
except Exception as error:
self.log.error("Operation failed. %s", error)
traceback.print_exc(file=sys.stdout)
continue
async def show_sessions(self):
"""
Returns an object with a lists of the session IDs
for all sessions currently attached to the realm
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.list")
for session_id in res:
session = await self.call("wamp.session.get", session_id)
self.log.info(session)
async def total_sessions(self):
"""
Returns the number of sessions currently attached to the realm.
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.count")
self.log.info(res)
async def lookup_session(self, topic_name):
"""
Attempts to find the session id for a given topic
http://crossbar.io/docs/Subscription-Meta-Events-and-Procedures/
"""
res = await self.call("wamp.subscription.lookup", topic_name)
self.log.info(res)
|
eventifyio/eventify | eventify/drivers/crossbar.py | Component.onUserError | python | def onUserError(self, fail, message):
self.log.error(fail)
self.log.error(message) | Handle user errors | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L86-L91 | null | class Component(BaseComponent, ApplicationSession):
"""
Handle subscribing to topics
"""
log = logging.getLogger("eventify.drivers.crossbar")
async def emit_event(self, event):
"""
Publish an event back to crossbar
:param event: Event object
"""
self.log.debug("publishing event on %s", self.publish_topic)
if self.config.extra['config']['pub_options']['retain']:
try:
await persist_event(
self.publish_topic,
event,
self.pool
)
except SystemError as error:
self.log.error(error)
return
try:
await self.publish(
self.publish_topic,
event.__dict__,
options=self.publish_options
)
except TransportLost as error:
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
self.log.error(error)
def onClose(self, wasClean):
"""
Disconnect when connection to message
broker is lost
"""
self.log.error('lost connection to crossbar on session %' + str(self.session_id))
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
def onDisconnect(self):
"""
Event fired when transport is lost
"""
self.log.error('onDisconnect event fired')
def onLeave(self, reason=None, message=None):
"""
:param reason:
:param message:
"""
self.log.info('Leaving realm; reason: %s', reason)
async def onJoin(self, details):
self.log.debug("joined websocket realm: %s", details)
# set session_id for reconnect
self.session_id = details.session
self.realm_id = details.realm
for handler in self.handlers:
# initialize handler
handler_instance = handler()
handler_instance.set_session(self)
if hasattr(handler_instance, 'init'):
await handler_instance.init()
if hasattr(handler_instance, 'on_event'):
self.log.debug("subscribing to topic %s", handler_instance.subscribe_topic)
# Used with base handler defined subscribe_topic
if handler_instance.subscribe_topic is not None:
await self.subscribe(
handler_instance.on_event,
handler_instance.subscribe_topic,
)
self.log.debug("subscribed to topic: %s", handler_instance.subscribe_topic)
else:
# Used with config.json defined topics
if self.subscribed_topics is not None:
for topic in self.subscribed_topics:
await self.subscribe(
handler_instance.on_event,
topic
)
self.log.debug("subscribed to topic: %s", topic)
if hasattr(handler_instance, 'worker'):
# or just await handler.worker()
while True:
try:
await handler_instance.worker()
except Exception as error:
self.log.error("Operation failed. %s", error)
traceback.print_exc(file=sys.stdout)
continue
async def show_sessions(self):
"""
Returns an object with a lists of the session IDs
for all sessions currently attached to the realm
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.list")
for session_id in res:
session = await self.call("wamp.session.get", session_id)
self.log.info(session)
async def total_sessions(self):
"""
Returns the number of sessions currently attached to the realm.
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.count")
self.log.info(res)
async def lookup_session(self, topic_name):
"""
Attempts to find the session id for a given topic
http://crossbar.io/docs/Subscription-Meta-Events-and-Procedures/
"""
res = await self.call("wamp.subscription.lookup", topic_name)
self.log.info(res)
|
eventifyio/eventify | eventify/drivers/crossbar.py | Component.show_sessions | python | async def show_sessions(self):
res = await self.call("wamp.session.list")
for session_id in res:
session = await self.call("wamp.session.get", session_id)
self.log.info(session) | Returns an object with a lists of the session IDs
for all sessions currently attached to the realm
http://crossbar.io/docs/Session-Metaevents-and-Procedures/ | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L138-L148 | null | class Component(BaseComponent, ApplicationSession):
"""
Handle subscribing to topics
"""
log = logging.getLogger("eventify.drivers.crossbar")
async def emit_event(self, event):
"""
Publish an event back to crossbar
:param event: Event object
"""
self.log.debug("publishing event on %s", self.publish_topic)
if self.config.extra['config']['pub_options']['retain']:
try:
await persist_event(
self.publish_topic,
event,
self.pool
)
except SystemError as error:
self.log.error(error)
return
try:
await self.publish(
self.publish_topic,
event.__dict__,
options=self.publish_options
)
except TransportLost as error:
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
self.log.error(error)
def onClose(self, wasClean):
"""
Disconnect when connection to message
broker is lost
"""
self.log.error('lost connection to crossbar on session %' + str(self.session_id))
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
def onDisconnect(self):
"""
Event fired when transport is lost
"""
self.log.error('onDisconnect event fired')
def onLeave(self, reason=None, message=None):
"""
:param reason:
:param message:
"""
self.log.info('Leaving realm; reason: %s', reason)
def onUserError(self, fail, message):
"""
Handle user errors
"""
self.log.error(fail)
self.log.error(message)
async def onJoin(self, details):
self.log.debug("joined websocket realm: %s", details)
# set session_id for reconnect
self.session_id = details.session
self.realm_id = details.realm
for handler in self.handlers:
# initialize handler
handler_instance = handler()
handler_instance.set_session(self)
if hasattr(handler_instance, 'init'):
await handler_instance.init()
if hasattr(handler_instance, 'on_event'):
self.log.debug("subscribing to topic %s", handler_instance.subscribe_topic)
# Used with base handler defined subscribe_topic
if handler_instance.subscribe_topic is not None:
await self.subscribe(
handler_instance.on_event,
handler_instance.subscribe_topic,
)
self.log.debug("subscribed to topic: %s", handler_instance.subscribe_topic)
else:
# Used with config.json defined topics
if self.subscribed_topics is not None:
for topic in self.subscribed_topics:
await self.subscribe(
handler_instance.on_event,
topic
)
self.log.debug("subscribed to topic: %s", topic)
if hasattr(handler_instance, 'worker'):
# or just await handler.worker()
while True:
try:
await handler_instance.worker()
except Exception as error:
self.log.error("Operation failed. %s", error)
traceback.print_exc(file=sys.stdout)
continue
async def total_sessions(self):
"""
Returns the number of sessions currently attached to the realm.
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.count")
self.log.info(res)
async def lookup_session(self, topic_name):
"""
Attempts to find the session id for a given topic
http://crossbar.io/docs/Subscription-Meta-Events-and-Procedures/
"""
res = await self.call("wamp.subscription.lookup", topic_name)
self.log.info(res)
|
eventifyio/eventify | eventify/drivers/crossbar.py | Component.lookup_session | python | async def lookup_session(self, topic_name):
res = await self.call("wamp.subscription.lookup", topic_name)
self.log.info(res) | Attempts to find the session id for a given topic
http://crossbar.io/docs/Subscription-Meta-Events-and-Procedures/ | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L159-L166 | null | class Component(BaseComponent, ApplicationSession):
"""
Handle subscribing to topics
"""
log = logging.getLogger("eventify.drivers.crossbar")
async def emit_event(self, event):
"""
Publish an event back to crossbar
:param event: Event object
"""
self.log.debug("publishing event on %s", self.publish_topic)
if self.config.extra['config']['pub_options']['retain']:
try:
await persist_event(
self.publish_topic,
event,
self.pool
)
except SystemError as error:
self.log.error(error)
return
try:
await self.publish(
self.publish_topic,
event.__dict__,
options=self.publish_options
)
except TransportLost as error:
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
self.log.error(error)
def onClose(self, wasClean):
"""
Disconnect when connection to message
broker is lost
"""
self.log.error('lost connection to crossbar on session %' + str(self.session_id))
for task in asyncio.Task.all_tasks():
task.cancel()
asyncio.get_event_loop().stop()
def onDisconnect(self):
"""
Event fired when transport is lost
"""
self.log.error('onDisconnect event fired')
def onLeave(self, reason=None, message=None):
"""
:param reason:
:param message:
"""
self.log.info('Leaving realm; reason: %s', reason)
def onUserError(self, fail, message):
"""
Handle user errors
"""
self.log.error(fail)
self.log.error(message)
async def onJoin(self, details):
self.log.debug("joined websocket realm: %s", details)
# set session_id for reconnect
self.session_id = details.session
self.realm_id = details.realm
for handler in self.handlers:
# initialize handler
handler_instance = handler()
handler_instance.set_session(self)
if hasattr(handler_instance, 'init'):
await handler_instance.init()
if hasattr(handler_instance, 'on_event'):
self.log.debug("subscribing to topic %s", handler_instance.subscribe_topic)
# Used with base handler defined subscribe_topic
if handler_instance.subscribe_topic is not None:
await self.subscribe(
handler_instance.on_event,
handler_instance.subscribe_topic,
)
self.log.debug("subscribed to topic: %s", handler_instance.subscribe_topic)
else:
# Used with config.json defined topics
if self.subscribed_topics is not None:
for topic in self.subscribed_topics:
await self.subscribe(
handler_instance.on_event,
topic
)
self.log.debug("subscribed to topic: %s", topic)
if hasattr(handler_instance, 'worker'):
# or just await handler.worker()
while True:
try:
await handler_instance.worker()
except Exception as error:
self.log.error("Operation failed. %s", error)
traceback.print_exc(file=sys.stdout)
continue
async def show_sessions(self):
"""
Returns an object with a lists of the session IDs
for all sessions currently attached to the realm
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.list")
for session_id in res:
session = await self.call("wamp.session.get", session_id)
self.log.info(session)
async def total_sessions(self):
"""
Returns the number of sessions currently attached to the realm.
http://crossbar.io/docs/Session-Metaevents-and-Procedures/
"""
res = await self.call("wamp.session.count")
self.log.info(res)
|
eventifyio/eventify | eventify/drivers/crossbar.py | Service.setup_runner | python | def setup_runner(self):
runner = ApplicationRunner(
url=self.config['transport_host'],
realm=u'realm1',
extra={
'config': self.config,
'handlers': self.handlers,
}
)
return runner | Setup instance of runner var | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L173-L185 | null | class Service(Eventify):
"""
Create crossbar service
"""
def check_transport_host(self):
"""
Check if crossbar port is open
on transport host
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('events-server', 8080)) # TODO: Read from config vs using hard coded hostname
if result == 0:
logging.info('port 8080 on crossbar is open!')
return True
return False
def reconnect(self):
"""
Handle reconnect logic if connection
to crossbar is lost
"""
connect_attempt = 0
max_retries = self.config['max_reconnect_retries']
logging.info('attempting to reconnect to crossbar')
runner = self.setup_runner()
while True:
if connect_attempt == max_retries:
logging.info('max retries reached; stopping service')
sys.exit(1)
self.check_event_loop()
try:
logging.info('waiting 5 seconds')
time.sleep(5)
if self.check_transport_host():
logging.info('waiting 10 seconds to ensure that crossbar has initialized before reconnecting')
time.sleep(10)
runner.run(Component)
else:
logging.error('crossbar host port 8080 not available...')
except RuntimeError as error:
logging.error(error)
except ConnectionRefusedError as error:
logging.error(error)
except ConnectionError as error:
logging.error(error)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
connect_attempt += 1
def start(self, start_loop=True):
"""
Start a producer/consumer service
"""
txaio.start_logging()
runner = self.setup_runner()
if start_loop:
try:
runner.run(Component)
except EventifyHandlerInitializationFailed as initError:
logging.error('Unable to initialize handler: %s.' % initError.message)
sys.exit(1)
except ConnectionRefusedError:
logging.error('Unable to connect to crossbar instance. Is it running?')
sys.exit(1)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
self.check_event_loop()
self.reconnect()
else:
return runner.run(
Component,
start_loop=start_loop
)
|
eventifyio/eventify | eventify/drivers/crossbar.py | Service.reconnect | python | def reconnect(self):
connect_attempt = 0
max_retries = self.config['max_reconnect_retries']
logging.info('attempting to reconnect to crossbar')
runner = self.setup_runner()
while True:
if connect_attempt == max_retries:
logging.info('max retries reached; stopping service')
sys.exit(1)
self.check_event_loop()
try:
logging.info('waiting 5 seconds')
time.sleep(5)
if self.check_transport_host():
logging.info('waiting 10 seconds to ensure that crossbar has initialized before reconnecting')
time.sleep(10)
runner.run(Component)
else:
logging.error('crossbar host port 8080 not available...')
except RuntimeError as error:
logging.error(error)
except ConnectionRefusedError as error:
logging.error(error)
except ConnectionError as error:
logging.error(error)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
connect_attempt += 1 | Handle reconnect logic if connection
to crossbar is lost | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L199-L235 | [
"def check_event_loop():\n \"\"\"\n Check if event loop is closed and\n create a new event loop\n \"\"\"\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n asyncio.set_event_loop(asyncio.new_event_loop())\n",
"def setup_runner(self):\n \"\"\"\n Setup instance of runner var\n \"\"\"\n runner = ApplicationRunner(\n url=self.config['transport_host'],\n realm=u'realm1',\n extra={\n 'config': self.config,\n 'handlers': self.handlers,\n }\n )\n return runner\n",
"def check_transport_host(self):\n \"\"\"\n Check if crossbar port is open\n on transport host\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('events-server', 8080)) # TODO: Read from config vs using hard coded hostname\n if result == 0:\n logging.info('port 8080 on crossbar is open!')\n return True\n return False\n"
] | class Service(Eventify):
"""
Create crossbar service
"""
def setup_runner(self):
"""
Setup instance of runner var
"""
runner = ApplicationRunner(
url=self.config['transport_host'],
realm=u'realm1',
extra={
'config': self.config,
'handlers': self.handlers,
}
)
return runner
def check_transport_host(self):
"""
Check if crossbar port is open
on transport host
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('events-server', 8080)) # TODO: Read from config vs using hard coded hostname
if result == 0:
logging.info('port 8080 on crossbar is open!')
return True
return False
def start(self, start_loop=True):
"""
Start a producer/consumer service
"""
txaio.start_logging()
runner = self.setup_runner()
if start_loop:
try:
runner.run(Component)
except EventifyHandlerInitializationFailed as initError:
logging.error('Unable to initialize handler: %s.' % initError.message)
sys.exit(1)
except ConnectionRefusedError:
logging.error('Unable to connect to crossbar instance. Is it running?')
sys.exit(1)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
self.check_event_loop()
self.reconnect()
else:
return runner.run(
Component,
start_loop=start_loop
)
|
eventifyio/eventify | eventify/drivers/crossbar.py | Service.start | python | def start(self, start_loop=True):
txaio.start_logging()
runner = self.setup_runner()
if start_loop:
try:
runner.run(Component)
except EventifyHandlerInitializationFailed as initError:
logging.error('Unable to initialize handler: %s.' % initError.message)
sys.exit(1)
except ConnectionRefusedError:
logging.error('Unable to connect to crossbar instance. Is it running?')
sys.exit(1)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
self.check_event_loop()
self.reconnect()
else:
return runner.run(
Component,
start_loop=start_loop
) | Start a producer/consumer service | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/crossbar.py#L237-L264 | [
"def check_event_loop():\n \"\"\"\n Check if event loop is closed and\n create a new event loop\n \"\"\"\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n asyncio.set_event_loop(asyncio.new_event_loop())\n",
"def setup_runner(self):\n \"\"\"\n Setup instance of runner var\n \"\"\"\n runner = ApplicationRunner(\n url=self.config['transport_host'],\n realm=u'realm1',\n extra={\n 'config': self.config,\n 'handlers': self.handlers,\n }\n )\n return runner\n",
"def reconnect(self):\n \"\"\"\n Handle reconnect logic if connection\n to crossbar is lost\n \"\"\"\n connect_attempt = 0\n max_retries = self.config['max_reconnect_retries']\n logging.info('attempting to reconnect to crossbar')\n runner = self.setup_runner()\n while True:\n\n if connect_attempt == max_retries:\n logging.info('max retries reached; stopping service')\n sys.exit(1)\n self.check_event_loop()\n\n try:\n logging.info('waiting 5 seconds')\n time.sleep(5)\n if self.check_transport_host():\n logging.info('waiting 10 seconds to ensure that crossbar has initialized before reconnecting')\n time.sleep(10)\n runner.run(Component)\n else:\n logging.error('crossbar host port 8080 not available...')\n except RuntimeError as error:\n logging.error(error)\n except ConnectionRefusedError as error:\n logging.error(error)\n except ConnectionError as error:\n logging.error(error)\n except KeyboardInterrupt:\n logging.info('User initiated shutdown')\n loop = asyncio.get_event_loop()\n loop.stop()\n sys.exit(1)\n connect_attempt += 1\n"
] | class Service(Eventify):
"""
Create crossbar service
"""
def setup_runner(self):
"""
Setup instance of runner var
"""
runner = ApplicationRunner(
url=self.config['transport_host'],
realm=u'realm1',
extra={
'config': self.config,
'handlers': self.handlers,
}
)
return runner
def check_transport_host(self):
"""
Check if crossbar port is open
on transport host
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('events-server', 8080)) # TODO: Read from config vs using hard coded hostname
if result == 0:
logging.info('port 8080 on crossbar is open!')
return True
return False
def reconnect(self):
"""
Handle reconnect logic if connection
to crossbar is lost
"""
connect_attempt = 0
max_retries = self.config['max_reconnect_retries']
logging.info('attempting to reconnect to crossbar')
runner = self.setup_runner()
while True:
if connect_attempt == max_retries:
logging.info('max retries reached; stopping service')
sys.exit(1)
self.check_event_loop()
try:
logging.info('waiting 5 seconds')
time.sleep(5)
if self.check_transport_host():
logging.info('waiting 10 seconds to ensure that crossbar has initialized before reconnecting')
time.sleep(10)
runner.run(Component)
else:
logging.error('crossbar host port 8080 not available...')
except RuntimeError as error:
logging.error(error)
except ConnectionRefusedError as error:
logging.error(error)
except ConnectionError as error:
logging.error(error)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
connect_attempt += 1
|
eventifyio/eventify | eventify/__init__.py | Eventify.set_missing_defaults | python | def set_missing_defaults(self):
if 'pub_options' not in self.config:
self.config['pub_options'] = {
'acknowledge': True,
'retain': True
}
if 'sub_options' not in self.config:
self.config['sub_options'] = {
'get_retained': False
}
if 'subscribed_topics' not in self.config:
self.config['subscribed_topics'] = None
if 'replay_events' not in self.config:
self.config['replay_events'] = False
if 'max_reconnect_retries' not in self.config:
self.config['max_reconnect_retries'] = 10 | Ensure that minimal configuration is
setup and set defaults for missing values | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/__init__.py#L48-L71 | null | class Eventify(object):
"""
Base Class for eventify
"""
def __init__(self, driver='crossbar',
config_file='config.json',
handlers=None):
"""
Args:
Driver
"""
handlers = handlers or []
logger.debug('initializing eventify project on driver: %s', driver)
if not handlers:
raise EventifyInitError("callback parameter is required")
self.driver = driver
self.config_file = config_file
self.config = self.load_config
self.handlers = handlers
self.config_sanity_check()
self.set_missing_defaults()
logger.debug('configuration loaded: %s', self.config)
def config_sanity_check(self):
"""
Base configuration sanity checks
"""
if 'name' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "name" as a string in your
configuration.""")
if 'publish_topic' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "public_topic" as an object
in your configuration.""")
if 'topic' not in self.config['publish_topic']:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "topic" as a key in your
"public_topic object.""")
@property
def load_config(self):
"""
Load configuration for the service
Args:
config_file: Configuration file path
"""
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
)
@staticmethod
def check_event_loop():
"""
Check if event loop is closed and
create a new event loop
"""
loop = asyncio.get_event_loop()
if loop.is_closed():
asyncio.set_event_loop(asyncio.new_event_loop())
|
eventifyio/eventify | eventify/__init__.py | Eventify.config_sanity_check | python | def config_sanity_check(self):
if 'name' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "name" as a string in your
configuration.""")
if 'publish_topic' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "public_topic" as an object
in your configuration.""")
if 'topic' not in self.config['publish_topic']:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "topic" as a key in your
"public_topic object.""") | Base configuration sanity checks | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/__init__.py#L73-L93 | null | class Eventify(object):
"""
Base Class for eventify
"""
def __init__(self, driver='crossbar',
config_file='config.json',
handlers=None):
"""
Args:
Driver
"""
handlers = handlers or []
logger.debug('initializing eventify project on driver: %s', driver)
if not handlers:
raise EventifyInitError("callback parameter is required")
self.driver = driver
self.config_file = config_file
self.config = self.load_config
self.handlers = handlers
self.config_sanity_check()
self.set_missing_defaults()
logger.debug('configuration loaded: %s', self.config)
def set_missing_defaults(self):
"""
Ensure that minimal configuration is
setup and set defaults for missing values
"""
if 'pub_options' not in self.config:
self.config['pub_options'] = {
'acknowledge': True,
'retain': True
}
if 'sub_options' not in self.config:
self.config['sub_options'] = {
'get_retained': False
}
if 'subscribed_topics' not in self.config:
self.config['subscribed_topics'] = None
if 'replay_events' not in self.config:
self.config['replay_events'] = False
if 'max_reconnect_retries' not in self.config:
self.config['max_reconnect_retries'] = 10
@property
def load_config(self):
"""
Load configuration for the service
Args:
config_file: Configuration file path
"""
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
)
@staticmethod
def check_event_loop():
"""
Check if event loop is closed and
create a new event loop
"""
loop = asyncio.get_event_loop()
if loop.is_closed():
asyncio.set_event_loop(asyncio.new_event_loop())
|
eventifyio/eventify | eventify/__init__.py | Eventify.load_config | python | def load_config(self):
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
) | Load configuration for the service
Args:
config_file: Configuration file path | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/__init__.py#L96-L112 | null | class Eventify(object):
"""
Base Class for eventify
"""
def __init__(self, driver='crossbar',
config_file='config.json',
handlers=None):
"""
Args:
Driver
"""
handlers = handlers or []
logger.debug('initializing eventify project on driver: %s', driver)
if not handlers:
raise EventifyInitError("callback parameter is required")
self.driver = driver
self.config_file = config_file
self.config = self.load_config
self.handlers = handlers
self.config_sanity_check()
self.set_missing_defaults()
logger.debug('configuration loaded: %s', self.config)
def set_missing_defaults(self):
"""
Ensure that minimal configuration is
setup and set defaults for missing values
"""
if 'pub_options' not in self.config:
self.config['pub_options'] = {
'acknowledge': True,
'retain': True
}
if 'sub_options' not in self.config:
self.config['sub_options'] = {
'get_retained': False
}
if 'subscribed_topics' not in self.config:
self.config['subscribed_topics'] = None
if 'replay_events' not in self.config:
self.config['replay_events'] = False
if 'max_reconnect_retries' not in self.config:
self.config['max_reconnect_retries'] = 10
def config_sanity_check(self):
"""
Base configuration sanity checks
"""
if 'name' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "name" as a string in your
configuration.""")
if 'publish_topic' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "public_topic" as an object
in your configuration.""")
if 'topic' not in self.config['publish_topic']:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "topic" as a key in your
"public_topic object.""")
@property
@staticmethod
def check_event_loop():
"""
Check if event loop is closed and
create a new event loop
"""
loop = asyncio.get_event_loop()
if loop.is_closed():
asyncio.set_event_loop(asyncio.new_event_loop())
|
eventifyio/eventify | eventify/__init__.py | Eventify.check_event_loop | python | def check_event_loop():
loop = asyncio.get_event_loop()
if loop.is_closed():
asyncio.set_event_loop(asyncio.new_event_loop()) | Check if event loop is closed and
create a new event loop | train | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/__init__.py#L115-L122 | null | class Eventify(object):
"""
Base Class for eventify
"""
def __init__(self, driver='crossbar',
config_file='config.json',
handlers=None):
"""
Args:
Driver
"""
handlers = handlers or []
logger.debug('initializing eventify project on driver: %s', driver)
if not handlers:
raise EventifyInitError("callback parameter is required")
self.driver = driver
self.config_file = config_file
self.config = self.load_config
self.handlers = handlers
self.config_sanity_check()
self.set_missing_defaults()
logger.debug('configuration loaded: %s', self.config)
def set_missing_defaults(self):
"""
Ensure that minimal configuration is
setup and set defaults for missing values
"""
if 'pub_options' not in self.config:
self.config['pub_options'] = {
'acknowledge': True,
'retain': True
}
if 'sub_options' not in self.config:
self.config['sub_options'] = {
'get_retained': False
}
if 'subscribed_topics' not in self.config:
self.config['subscribed_topics'] = None
if 'replay_events' not in self.config:
self.config['replay_events'] = False
if 'max_reconnect_retries' not in self.config:
self.config['max_reconnect_retries'] = 10
def config_sanity_check(self):
"""
Base configuration sanity checks
"""
if 'name' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "name" as a string in your
configuration.""")
if 'publish_topic' not in self.config:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "public_topic" as an object
in your configuration.""")
if 'topic' not in self.config['publish_topic']:
raise EventifyConfigError(
"""Required configuration parameter missing!
Please configure "topic" as a key in your
"public_topic object.""")
@property
def load_config(self):
"""
Load configuration for the service
Args:
config_file: Configuration file path
"""
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
)
@staticmethod
|
etal/biocma | biocma/sugar.py | maybe_open | python | def maybe_open(infile, mode='r'):
# ENH: Exception safety?
if isinstance(infile, basestring):
handle = open(infile, mode)
do_close = True
else:
handle = infile
do_close = False
yield handle
if do_close:
handle.close() | Take a file name or a handle, and return a handle.
Simplifies creating functions that automagically accept either a file name
or an already opened file handle. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/sugar.py#L26-L41 | null | """Helpers."""
import contextlib
import itertools
import logging
def make_reader(parser):
def read(infile, *args, **kwargs):
gen = parser(infile, *args, **kwargs)
try:
first = gen.next()
except StopIteration:
raise ValueError("Input file is empty")
try:
gen.next()
except StopIteration:
return first
else:
raise ValueError("Input file contains multiple elements;"
"use parse() instead.")
return read
@contextlib.contextmanager
def unblank(stream):
"""Remove blank lines from a file being read iteratively."""
return itertools.ifilter(None, (line.strip() for line in stream))
|
etal/biocma | biocma/biocma.py | ChainMultiAlignment.from_block | python | def from_block(cls, block):
rseqs = cma.realign_seqs(block)
records = (SeqRecord(Seq(rseq, extended_protein),
id=bseq['id'],
description=bseq['description'],
dbxrefs=bseq['dbxrefs'].values(), # list of strings
annotations=dict(
index=bseq['index'],
length=bseq['length'],
dbxrefs=bseq['dbxrefs'],
phylum=bseq['phylum'],
taxchar=bseq['taxchar'],
head_seq=bseq['head_seq'],
tail_seq=bseq['tail_seq'],
head_len=bseq['head_len'],
tail_len=bseq['tail_len'], # dict
),
# ENH: annotate with conservation levels
# letter_annotations=bseq['x'],
)
for bseq, rseq in zip(block['sequences'], rseqs))
return cls(records,
# CMA attributes
# block['one'],
block['level'],
block['name'],
block['params'],
block['query_length'],
block['query_chars'],
) | Instantiate this class given a raw block (see parse_raw). | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/biocma.py#L67-L97 | [
"def realign_seqs(block, gap_char='.', align_indels=False):\n \"\"\"Add gaps to a block so all residues in a column are equivalent.\n\n Given a block, containing a list of \"sequences\" (dicts) each containing a\n \"seq\" (actual string sequence, where upper=match, lower=insert, dash=gap),\n insert gaps (- or .) into the sequences s.t.\n\n 1. columns line up properly, and\n 2. all resulting sequences have the same length\n\n\n The reason this needs to be done is that the query/consensus sequence is not\n assigned gaps to account for inserts in the other sequences. We need to add\n the gaps back to obtain a normal alignment.\n\n `return`: a list of realigned sequence strings.\n \"\"\"\n # ENH: align inserts using an external tool (if align_indels)\n all_chars = [list(sq['seq']) for sq in block['sequences']]\n # NB: If speed is an issue here, consider Numpy or Cython\n # main problem: list.insert is O(n) -- would OrderedDict help?\n nrows = len(all_chars)\n i = 0\n while i < len(all_chars[0]):\n rows_need_gaps = [r for r in all_chars if not r[i].islower()]\n if len(rows_need_gaps) != nrows:\n for row in rows_need_gaps:\n row.insert(i, gap_char)\n i += 1\n return [''.join(row) for row in all_chars]\n"
] | class ChainMultiAlignment(MultipleSeqAlignment):
"""Based on Biopython's class for a multiple sequence alignment."""
def __init__(self, records,
# CMA-specific attributes here
# one=1,
level=0,
name=None,
params=None,
query_length=None,
query_chars=None,
):
# NB: alphabet is always protein; X is OK
MultipleSeqAlignment.__init__(self, records, extended_protein)
self.level = level
self.name = name
self.params = params
self.query_length = query_length
self.query_chars = query_chars
@classmethod
|
etal/biocma | biocma/cma.py | _parse_blocks | python | def _parse_blocks(instream):
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
} | Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0]. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L43-L75 | [
"def unblank(stream):\n \"\"\"Remove blank lines from a file being read iteratively.\"\"\"\n return itertools.ifilter(None, (line.strip() for line in stream))\n",
"def _parse_block_header(line):\n \"\"\"\n [0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:\n \"\"\"\n level = line[1]\n one, _rest = line[4:].split(')=', 1)\n name, _rest = _rest.split('(', 1)\n seqcount, _rest = _rest.split(')', 1)\n params = _rest.strip('{}:')\n # try:\n # params = dict((key, float(val))\n # for key, val in (pair.split('=')\n # for pair in _rest[1:-2].split(',')))\n # except ValueError:\n # # Couldn't convert params to key-val pairs, for whatever reason\n # logging.warn(\"Failed to parse CMA params: %s\", _rest[1:-2])\n # params = {}\n return int(level), int(one), name, int(seqcount), params\n",
"def _parse_block_postheader(line):\n \"\"\"\n (209)**************!*****************!!*************...\n \"\"\"\n parts = line[1:].split(')', 1)\n qlen = int(parts[0])\n if not len(parts[1]) == qlen:\n logging.warn(\"postheader expected %d-long query, found %d\",\n qlen, len(parts[1]))\n return qlen, parts[1]\n",
"def _parse_sequences(ilines, expect_qlen):\n \"\"\"Parse the sequences in the current block.\n\n Sequence looks like:\n\n $3=227(209):\n >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75\n {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*\n\n \"\"\"\n while True:\n first = next(ilines)\n if first.startswith('_') and first.endswith('].'):\n # End of sequences & end of block\n break\n\n # ENH: handle wrapped lines?\n try:\n index, this_len, query_len = _parse_seq_preheader(first)\n except ValueError:\n logging.warn('Unparseable line (SKIPPING):\\n%s', first)\n continue\n (rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description\n ) = _parse_seq_header(next(ilines))\n try:\n headseq, molseq, tailseq = _parse_seq_body(next(ilines))\n except ValueError:\n logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)\n continue\n\n # Validation\n if expect_qlen != query_len:\n logging.warn(\"Query length in %s given as %d; expected %d\",\n rec_id, query_len, expect_qlen)\n if not headseq and not headlen:\n headlen = 0\n if not tailseq and not taillen:\n taillen = 0\n if headseq:\n if headlen is None:\n headlen = len(headseq)\n elif headlen != len(headseq):\n logging.warn(\"Conflicting head flank lengths in %s: %d, %d\",\n rec_id, headlen, len(headseq))\n if tailseq:\n if taillen is None:\n taillen = len(tailseq)\n elif taillen != len(tailseq):\n logging.warn(\"Conflicting tail flank lengths in %s: %d, %d\",\n rec_id, taillen, len(tailseq))\n\n yield {'index': index,\n 'id': rec_id,\n 'description': description,\n 'dbxrefs': dbxrefs,\n 'phylum': phylum,\n 'taxchar': taxchar,\n 'head_len': headlen,\n 'tail_len': taillen,\n 'head_seq': headseq,\n 'tail_seq': tailseq,\n 'length': this_len,\n 'seq': molseq,\n }\n"
] | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | _parse_sequences | python | def _parse_sequences(ilines, expect_qlen):
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
} | Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}* | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L78-L141 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | _parse_block_header | python | def _parse_block_header(line):
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params | [0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}: | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L146-L163 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | _parse_block_postheader | python | def _parse_block_postheader(line):
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1] | (209)**************!*****************!!*************... | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L166-L175 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | _parse_seq_preheader | python | def _parse_seq_preheader(line):
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len)) | $3=227(209): | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L178-L186 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | _parse_seq_header | python | def _parse_seq_header(line):
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description | Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L189-L254 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | _parse_seq_body | python | def _parse_seq_body(line):
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail) | Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L257-L285 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | realign_seqs | python | def realign_seqs(block, gap_char='.', align_indels=False):
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars] | Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L342-L371 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | consensus2block | python | def consensus2block(record, level=0, name=None):
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
) | Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L374-L389 | [
"def seqrecord2sequence(record, qlen, index):\n \"\"\"Convert a Biopython SeqRecord to a esbglib.cma block.\n\n Indels (gaps, casing) must have already been handled in the record.\n \"\"\"\n # aligned_len = sum(map(str.isupper, str(record.seq)))\n # assert qlen == aligned_len, (\n # \"Aligned sequence length %s, query %s\\n%s\"\n # % (aligned_len, qlen, str(record)))\n\n description = (record.description.split(' ', 1)[1]\n if ' ' in record.description\n else ' ')\n return dict(index=index,\n id=record.id,\n description=description,\n dbxrefs={},\n phylum='',\n taxchar='',\n head_len=None,\n tail_len=None,\n head_seq='',\n tail_seq='',\n length=len(record.seq) - record.seq.count('-'),\n seq=str(record.seq),\n )\n"
] | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | seqrecord2sequence | python | def seqrecord2sequence(record, qlen, index):
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
) | Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L392-L417 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | collapse_to_consensus | python | def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block | Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L426-L495 | [
"def iron(sequence):\n \"\"\"'Iron out' indel regions in the aligned sequence.\n\n Any inserts next to deletions are converted to matches (uppercase).\n\n Given a CMA string like:\n AAAAbc--de-f--gAAA\n Result:\n AAAABCDEFgAAA\n \"\"\"\n r_indel = re.compile(r'(-[a-y]|[a-y]-)')\n orig_sequence = sequence\n while r_indel.search(sequence):\n in_insert = False\n in_gap = False\n seen_gaps = 0\n inserts = []\n outchars = []\n\n for char in sequence:\n if in_insert:\n if char.islower():\n # Extend the insert\n inserts.append(char)\n elif char.isupper():\n # Indel is over; 'iron' out & emit inserts, then gaps\n in_insert = False\n outchars.extend(inserts)\n inserts = []\n outchars.append('-' * seen_gaps)\n seen_gaps = 0\n outchars.append(char)\n else:\n # Convert a preceding indel char to a 'match' (uppercase)\n # If the indel and gap are both multiple chars, this will\n # capitalize the insert left-to-right, then leave any gap\n # remainer as-is.\n assert char == '-'\n if not inserts:\n in_insert = False\n in_gap = True\n seen_gaps += 1\n else:\n outchars.append(inserts.pop(0).upper())\n # NB: Only leave the insert region if we've finished\n # converting all the insert chars\n if not inserts:\n in_insert = False\n in_gap = True\n\n elif in_gap:\n if char.islower():\n in_insert = True\n in_gap = False\n # If some inserts previously seen, emit them now\n # If no inserts have been seen yet, we'll iron this indel\n if inserts:\n outchars.extend(inserts)\n outchars.append('-' * seen_gaps)\n seen_gaps = 0\n inserts = [char]\n elif char.isupper():\n in_gap = False\n # End of the gap -- emit\n if inserts:\n outchars.extend(inserts)\n inserts = []\n outchars.append('-' * seen_gaps)\n seen_gaps = 0\n outchars.append(char)\n else:\n # Extend the gap\n assert char == '-'\n seen_gaps += 1\n\n else:\n assert not inserts and not seen_gaps, (\n \"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s\"\n % (inserts, seen_gaps, sequence, in_insert, in_gap))\n # Coming from Match state\n if char.isupper():\n # Extend the match\n outchars.append(char)\n elif char.islower():\n inserts.append(char)\n in_insert = True\n else:\n assert char == '-'\n seen_gaps += 1\n in_gap = True\n\n # Emit any trailing indel\n if inserts:\n outchars.extend(inserts)\n if seen_gaps:\n outchars.append('-' * seen_gaps)\n sequence = ''.join(outchars)\n # logging.info(sequence)\n assert (sequence.replace('-', '').upper()\n ==\n orig_sequence.replace('-', '').upper()), \\\n '\\nOrig: ' + orig_sequence + \\\n '\\nIron: ' + sequence\n return sequence\n",
"def consensus2block(record, level=0, name=None):\n \"\"\"Convert a Biopython SeqRecord to a esbglib.cma block.\n\n Ungapping is handled here.\n \"\"\"\n cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()\n record.seq = cons_ungap\n return dict(\n level=level, #record.annotations.get('level', 0),\n one=1,\n name=name or record.id,\n params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',\n query_length=len(cons_ungap),\n query_chars='*'*len(cons_ungap),\n sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]\n )\n",
"def seqrecord2sequence(record, qlen, index):\n \"\"\"Convert a Biopython SeqRecord to a esbglib.cma block.\n\n Indels (gaps, casing) must have already been handled in the record.\n \"\"\"\n # aligned_len = sum(map(str.isupper, str(record.seq)))\n # assert qlen == aligned_len, (\n # \"Aligned sequence length %s, query %s\\n%s\"\n # % (aligned_len, qlen, str(record)))\n\n description = (record.description.split(' ', 1)[1]\n if ' ' in record.description\n else ' ')\n return dict(index=index,\n id=record.id,\n description=description,\n dbxrefs={},\n phylum='',\n taxchar='',\n head_len=None,\n tail_len=None,\n head_seq='',\n tail_seq='',\n length=len(record.seq) - record.seq.count('-'),\n seq=str(record.seq),\n )\n",
"def replace_asterisks(seq, label=None):\n if '*' in seq:\n logging.warn(\"Sequence %scontains '*' character; replacing with 'X'\",\n str(label) + ' ' if label else '')\n return str(seq).replace('*', 'X')\n"
] | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/cma.py | iron | python | def iron(sequence):
"""'Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA
"""
r_indel = re.compile(r'(-[a-y]|[a-y]-)')
orig_sequence = sequence
while r_indel.search(sequence):
in_insert = False
in_gap = False
seen_gaps = 0
inserts = []
outchars = []
for char in sequence:
if in_insert:
if char.islower():
# Extend the insert
inserts.append(char)
elif char.isupper():
# Indel is over; 'iron' out & emit inserts, then gaps
in_insert = False
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Convert a preceding indel char to a 'match' (uppercase)
# If the indel and gap are both multiple chars, this will
# capitalize the insert left-to-right, then leave any gap
# remainer as-is.
assert char == '-'
if not inserts:
in_insert = False
in_gap = True
seen_gaps += 1
else:
outchars.append(inserts.pop(0).upper())
# NB: Only leave the insert region if we've finished
# converting all the insert chars
if not inserts:
in_insert = False
in_gap = True
elif in_gap:
if char.islower():
in_insert = True
in_gap = False
# If some inserts previously seen, emit them now
# If no inserts have been seen yet, we'll iron this indel
if inserts:
outchars.extend(inserts)
outchars.append('-' * seen_gaps)
seen_gaps = 0
inserts = [char]
elif char.isupper():
in_gap = False
# End of the gap -- emit
if inserts:
outchars.extend(inserts)
inserts = []
outchars.append('-' * seen_gaps)
seen_gaps = 0
outchars.append(char)
else:
# Extend the gap
assert char == '-'
seen_gaps += 1
else:
assert not inserts and not seen_gaps, (
"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s"
% (inserts, seen_gaps, sequence, in_insert, in_gap))
# Coming from Match state
if char.isupper():
# Extend the match
outchars.append(char)
elif char.islower():
inserts.append(char)
in_insert = True
else:
assert char == '-'
seen_gaps += 1
in_gap = True
# Emit any trailing indel
if inserts:
outchars.extend(inserts)
if seen_gaps:
outchars.append('-' * seen_gaps)
sequence = ''.join(outchars)
# logging.info(sequence)
assert (sequence.replace('-', '').upper()
==
orig_sequence.replace('-', '').upper()), \
'\nOrig: ' + orig_sequence + \
'\nIron: ' + sequence
return sequence | Iron out' indel regions in the aligned sequence.
Any inserts next to deletions are converted to matches (uppercase).
Given a CMA string like:
AAAAbc--de-f--gAAA
Result:
AAAABCDEFgAAA | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L498-L601 | null | #!/usr/bin/env python
"""Lower-level functionality for parsing CMA (.cma) files.
This handles .chn (CHAIN) files as a collection of CMA blocks, i.e. alignments.
"""
import itertools
import logging
import re
from . import sugar
def parse(infile):
with sugar.maybe_open(infile) as instream:
for block in _parse_blocks(instream):
yield block
read = sugar.make_reader(parse)
# Removed
# parse_raw = parse
# --------------------------------------------------------------------
# Parse the cma into raw chunks
# Flow:
# if START:
# parse line 1
# parse line 2
# pass control to _parse_sequences
#
# _parse_sequences:
# read a non-blank line;
# if END: return (w/ all sequences accumulated)
# else:
# read 2 more non-blank lines
# parse the 3 read lines & bundle up data
# yield a seq
#
def _parse_blocks(instream):
"""Parse an alignment block from the given file handle.
Block looks like:
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
(209)***********************************************...
... sequences, numbered 1-8 ...
_0].
"""
ilines = sugar.unblank(instream)
for line in ilines:
if line.startswith('['):
# Start of block
level, one, name, seqcount, params = _parse_block_header(line)
qlen, qchars = _parse_block_postheader(next(ilines))
# Pass control to the sequence parser
sequences = list(_parse_sequences(ilines, qlen))
# Validation
if not len(sequences) == seqcount:
logging.warn("Expected %d sequences in block %s, found %d",
seqcount, name, len(sequences))
yield {'level': level,
'one': one,
'name': name,
# 'seqcount': seqcount,
'params': params,
'query_length': qlen,
'query_chars': qchars,
'sequences': sequences,
}
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
}
# Microparsing
def _parse_block_header(line):
"""
[0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}:
"""
level = line[1]
one, _rest = line[4:].split(')=', 1)
name, _rest = _rest.split('(', 1)
seqcount, _rest = _rest.split(')', 1)
params = _rest.strip('{}:')
# try:
# params = dict((key, float(val))
# for key, val in (pair.split('=')
# for pair in _rest[1:-2].split(',')))
# except ValueError:
# # Couldn't convert params to key-val pairs, for whatever reason
# logging.warn("Failed to parse CMA params: %s", _rest[1:-2])
# params = {}
return int(level), int(one), name, int(seqcount), params
def _parse_block_postheader(line):
"""
(209)**************!*****************!!*************...
"""
parts = line[1:].split(')', 1)
qlen = int(parts[0])
if not len(parts[1]) == qlen:
logging.warn("postheader expected %d-long query, found %d",
qlen, len(parts[1]))
return qlen, parts[1]
def _parse_seq_preheader(line):
"""
$3=227(209):
"""
match = re.match(r"\$ (\d+) = (\d+) \( (\d+) \):", line, re.VERBOSE)
if not match:
raise ValueError("Unparseable header: " + line)
index, this_len, query_len = match.groups()
return map(int, (index, this_len, query_len))
def _parse_seq_header(line):
"""Unique ID, head/tail lengths and taxonomy info from a sequence header.
The description is the part of the FASTA/CMA sequence header starting after
the first space (i.e. excluding ID), to the end of the line.
This function looks inside the first '{...}' pair to extract info.
Ex:
>consensus seq
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
>gi|3212262|pdb|1A2K|C {<Chordata(M)>}Chain C, Gdpran-Ntf2 Complex >gi|3212263|pdb|1A2K|D Chain D, Gdpran-Ntf2 Complex >gi|3212264|pdb|1A2K|E Chain E, Gdpran-Ntf2 Complex >gi|5542273|pdb|1IBR|A C
"""
# ENH: use the two functions in esbglib.parseutils
# or, move one or both of those functions into here
_parts = line[1:].split(None, 1)
rec_id = _parts[0]
descr = _parts[1] if _parts[1:] else ''
# Database cross references
dbxrefs = {}
if '|' in rec_id:
id_gen = iter(rec_id.rstrip('|').split('|'))
for key in id_gen:
try:
dbxrefs[key] = next(id_gen)
except StopIteration:
break
# Head/tail lengths and taxonomy codes
headlen = taillen = None
phylum = taxchar = ''
if descr.startswith('{'):
_deets, description = descr[1:].split('}', 1)
match = re.search(r"""
(?:
\| (?P<headlen> \d+)
\( (?P<taillen> \d+)
\)
\|
)?
(?:
< (?P<phylum> .+?)
\( (?P<taxchar> \w)
\)
>
)?
""", _deets, re.VERBOSE)
if match:
headlen, taillen, phylum, taxchar = match.groups()
if headlen is not None:
headlen = int(headlen)
if taillen is not None:
taillen = int(taillen)
if phylum is None:
phylum = ''
if taxchar is None:
taxchar = ''
else:
logging.warn("Couldn't match head/tail: %s", _deets)
else:
description = descr
# TODO - return a dictionary here, update it in _parse_sequences
return rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail)
# --------------------------------------------------------------------
# Write
def write(blocks, outfile):
if isinstance(blocks, dict) and 'sequences' in blocks:
blocks = [blocks]
with sugar.maybe_open(outfile, 'w+') as outstream:
outstream.writelines(
itertools.chain(*map(_format_block, blocks)))
def _format_block(block):
# Block header
# [0_(1)=structs.seq(12){go=19,gx=2,pn=5.0,lf=0,rf=0}:
# (69)*****!***************************!!!******!******************!*******
yield """\
[{level}_({one})={name}({seqcount}){{{params}}}:
({query_length}){query_chars}
""".format(
seqcount=len(block['sequences']),
# fixed_params=','.join('{0}={1}'.format(key, val)
# for key, val in block['params'].iteritems()),
**block)
# Sequences
for s in _format_sequences(block['sequences'], block['query_length']):
yield s
# Block close
yield "_%s].\n" % block['level']
def _format_sequences(sequences, query_length):
# $1=254(255):
# >2QG5|A {|27(6)|}
# {()YTLENTIGRGSWGEVKIAVQKGTRIRRAAKKIPKYFV---EDVDRFKQEIEIMKSLDHPNIIRLYETFEDNTDIYLVMELCTGGELFERVVHKRVFRESDAARIMKDVLSAVAYCHKLNVAHRDLKPENFLFltdSPDSPLKLIDFGLAARFkpGKMMRTKVGTPYYVSPQVLEGL-YGPECDEWSAGVMMYVLLCGYPPFSAPTDXEVMLKIREGTFtfpeKDWLNVSPQAESLIRRLLTKSPKQRIT-----SLQALEHEW-()}*
for idx, seq in enumerate(sequences):
head_tail = ("|{head_len}({tail_len})|".format(**seq)
if seq['head_len'] or seq['tail_len'] else "")
taxonomy=("<{phylum}({taxchar})>".format(**seq)
if seq['phylum'] and seq['taxchar'] else "")
special_header = ("{{{0}{1}}}".format(head_tail, taxonomy)
if head_tail or taxonomy else "")
seq['index'] = idx + 1
yield """\
${index}={length}({0}):
>{id} {1}{description}
{head_seq}{{(){seq}()}}{tail_seq}*
""".format(query_length, special_header, **seq)
# --------------------------------------------------------------------
# Utilities
def realign_seqs(block, gap_char='.', align_indels=False):
"""Add gaps to a block so all residues in a column are equivalent.
Given a block, containing a list of "sequences" (dicts) each containing a
"seq" (actual string sequence, where upper=match, lower=insert, dash=gap),
insert gaps (- or .) into the sequences s.t.
1. columns line up properly, and
2. all resulting sequences have the same length
The reason this needs to be done is that the query/consensus sequence is not
assigned gaps to account for inserts in the other sequences. We need to add
the gaps back to obtain a normal alignment.
`return`: a list of realigned sequence strings.
"""
# ENH: align inserts using an external tool (if align_indels)
all_chars = [list(sq['seq']) for sq in block['sequences']]
# NB: If speed is an issue here, consider Numpy or Cython
# main problem: list.insert is O(n) -- would OrderedDict help?
nrows = len(all_chars)
i = 0
while i < len(all_chars[0]):
rows_need_gaps = [r for r in all_chars if not r[i].islower()]
if len(rows_need_gaps) != nrows:
for row in rows_need_gaps:
row.insert(i, gap_char)
i += 1
return [''.join(row) for row in all_chars]
def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
)
def seqrecord2sequence(record, qlen, index):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Indels (gaps, casing) must have already been handled in the record.
"""
# aligned_len = sum(map(str.isupper, str(record.seq)))
# assert qlen == aligned_len, (
# "Aligned sequence length %s, query %s\n%s"
# % (aligned_len, qlen, str(record)))
description = (record.description.split(' ', 1)[1]
if ' ' in record.description
else ' ')
return dict(index=index,
id=record.id,
description=description,
dbxrefs={},
phylum='',
taxchar='',
head_len=None,
tail_len=None,
head_seq='',
tail_seq='',
length=len(record.seq) - record.seq.count('-'),
seq=str(record.seq),
)
def replace_asterisks(seq, label=None):
if '*' in seq:
logging.warn("Sequence %scontains '*' character; replacing with 'X'",
str(label) + ' ' if label else '')
return str(seq).replace('*', 'X')
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block
# --------------------------------------------------------------------
if __name__ == '__main__':
# Test
import sys
from .utils import get_equivalent_positions
cmafiles = sys.argv[1:]
if not cmafiles:
sys.exit("usage: cma.py <cmafile1> [<cmafile2> ...]")
for fname in cmafiles:
block = next(parse(fname))
print block['query_length'], "aa query"
print len(block['sequences']), "sequences"
print "of lengths:",
for seq in block['sequences']:
print "%d/%d(%d)" % (
len(seq['seq']), seq['length'], seq['seq'].count('-')),
print
print " Equivalencies:"
for idx in xrange(1, 20):
print idx, '=', get_equivalent_positions(block)[idx]
print
print " Writing the file back out:"
print
if len(block['sequences']) < 60:
write(block, sys.stdout)
|
etal/biocma | biocma/utils.py | find_seq_rec | python | def find_seq_rec(block, name, case_sensitive=True):
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name)) | Given part of a sequence ID, find the first matching record. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L6-L18 | [
"def test(name, rec):\n return name in rec['id']\n",
"def test(name, rec):\n return name.upper() in rec['id'].upper()\n"
] | import logging
# from .cma import parse,
def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id']
def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence
def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus))
def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
def number_letters(block_or_record, key=None):
"""Return a dict of {posn: restype} for each letter in the sequence."""
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq))
|
etal/biocma | biocma/utils.py | find_seq_id | python | def find_seq_id(block, name, case_sensitive=True):
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id'] | Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L21-L33 | [
"def find_seq_rec(block, name, case_sensitive=True):\n \"\"\"Given part of a sequence ID, find the first matching record.\"\"\"\n if case_sensitive:\n def test(name, rec):\n return name in rec['id']\n else:\n def test(name, rec):\n return name.upper() in rec['id'].upper()\n\n for rec in block['sequences']:\n if test(name, rec):\n return rec\n raise ValueError(\"No sequence ID matches %s\" % repr(name))\n"
] | import logging
# from .cma import parse,
def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name))
def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence
def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus))
def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
def number_letters(block_or_record, key=None):
"""Return a dict of {posn: restype} for each letter in the sequence."""
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq))
|
etal/biocma | biocma/utils.py | get_consensus | python | def get_consensus(block):
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence | Calculate a simple consensus sequence for the block. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L36-L60 | null | import logging
# from .cma import parse,
def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name))
def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id']
def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus))
def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
def number_letters(block_or_record, key=None):
"""Return a dict of {posn: restype} for each letter in the sequence."""
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq))
|
etal/biocma | biocma/utils.py | get_conservation | python | def get_conservation(block):
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus)) | Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation} | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L63-L84 | null | import logging
# from .cma import parse,
def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name))
def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id']
def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence
def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
def number_letters(block_or_record, key=None):
"""Return a dict of {posn: restype} for each letter in the sequence."""
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq))
|
etal/biocma | biocma/utils.py | get_equivalent_positions | python | def get_equivalent_positions(block):
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies | Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L87-L156 | null | import logging
# from .cma import parse,
def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name))
def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id']
def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence
def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus))
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
def number_letters(block_or_record, key=None):
"""Return a dict of {posn: restype} for each letter in the sequence."""
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq))
|
etal/biocma | biocma/utils.py | get_inserts | python | def get_inserts(block):
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences']) | Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...} | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L159-L200 | null | import logging
# from .cma import parse,
def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name))
def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id']
def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence
def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus))
def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies
def number_letters(block_or_record, key=None):
"""Return a dict of {posn: restype} for each letter in the sequence."""
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq))
|
etal/biocma | biocma/utils.py | number_letters | python | def number_letters(block_or_record, key=None):
if key:
logging.warn("DEPRECATED: Pass a record instead")
assert 'sequences' in block_or_record, "Expected a block and a key"
record = find_seq_rec(block_or_record, key)
else:
assert 'id' in block_or_record, "Expected argument to be a record"
record = block_or_record
# Ungapped sequence -- accounts for dels vs. consensus
seq = record['seq'].replace('-', '').replace('.', '')
# Difference between string positions and residue numbers
shift = record['head_len'] + 1
return dict((idx + shift, letter)
for idx, letter in enumerate(seq)) | Return a dict of {posn: restype} for each letter in the sequence. | train | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L203-L217 | [
"def find_seq_rec(block, name, case_sensitive=True):\n \"\"\"Given part of a sequence ID, find the first matching record.\"\"\"\n if case_sensitive:\n def test(name, rec):\n return name in rec['id']\n else:\n def test(name, rec):\n return name.upper() in rec['id'].upper()\n\n for rec in block['sequences']:\n if test(name, rec):\n return rec\n raise ValueError(\"No sequence ID matches %s\" % repr(name))\n"
] | import logging
# from .cma import parse,
def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name))
def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id']
def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence
def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus))
def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
|
Naresh1318/crystal | crystal/sql_table_utils.py | adapt_array | python | def adapt_array(arr):
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read()) | http://stackoverflow.com/a/31312102/190597 (SoulNibbler) | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L32-L39 | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | open_data_base_connection | python | def open_data_base_connection(skip_dir_check=False):
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c | Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L51-L72 | [
"def get_database_dir(self):\n return self.database_dir\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | drop_run | python | def drop_run(project_name, run_name):
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name)) | Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L75-L103 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | drop_project | python | def drop_project(project_name):
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name)) | Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L106-L127 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n",
"def drop_run(project_name, run_name):\n \"\"\"\n Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted\n :param project_name: String, project which contains the desire run_name\n :param run_name: String, run to delete\n \"\"\"\n conn, c = open_data_base_connection()\n # delete all the variable tables first\n c.execute(\"SELECT variable_name FROM {}\".format(run_name))\n try:\n all_variables = np.array(c.fetchall()).squeeze(axis=1)\n for i in all_variables:\n variable_table_name = run_name + '_' + i\n c.execute(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(variable_table_name))\n except np.core._internal.AxisError:\n print(\"Did not find any values, so deleting run table directly.\")\n\n c.execute(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(run_name))\n c.execute(\"\"\"DELETE FROM {} WHERE run_name='{}'\"\"\".format(project_name + '_run_table', run_name))\n\n # delete project if project_name+'_run_table' is empty\n c.execute(\"\"\"SELECT run_name FROM {}\"\"\".format(project_name + '_run_table'))\n all_runs = c.fetchall()\n if len(all_runs) == 0:\n c.execute(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(project_name + '_run_table'))\n c.execute(\"\"\"DELETE FROM main_table WHERE project_name='{}'\"\"\".format(project_name))\n\n conn.commit()\n print(\"{} table deleted\".format(run_name))\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | get_figure_stats | python | def get_figure_stats(run_table_name):
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names | Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L171-L184 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n",
"def convert_list_to_dict(input_list):\n \"\"\"\n Convert a list of values into a dict with int as keys\n :param input_list: list, list to convert\n :return: dict -> {<int_keys>: <list_elements>}\n \"\"\"\n return {k: v for k, v in enumerate(input_list)}\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | get_projects | python | def get_projects():
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close() | Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L199-L213 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n",
"def convert_list_to_dict(input_list):\n \"\"\"\n Convert a list of values into a dict with int as keys\n :param input_list: list, list to convert\n :return: dict -> {<int_keys>: <list_elements>}\n \"\"\"\n return {k: v for k, v in enumerate(input_list)}\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | get_runs | python | def get_runs(project_name):
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close() | Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L216-L230 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n",
"def convert_list_to_dict(input_list):\n \"\"\"\n Convert a list of values into a dict with int as keys\n :param input_list: list, list to convert\n :return: dict -> {<int_keys>: <list_elements>}\n \"\"\"\n return {k: v for k, v in enumerate(input_list)}\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | get_variables | python | def get_variables(run_name):
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close() | Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L233-L249 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n",
"def convert_list_to_dict(input_list):\n \"\"\"\n Convert a list of values into a dict with int as keys\n :param input_list: list, list to convert\n :return: dict -> {<int_keys>: <list_elements>}\n \"\"\"\n return {k: v for k, v in enumerate(input_list)}\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | get_variable_update_dicts | python | def get_variable_update_dicts(current_index, variable_names, selected_run):
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data | Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L252-L319 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def generate_graph_csv(variable_table_name):
"""
Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path
"""
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/sql_table_utils.py | generate_graph_csv | python | def generate_graph_csv(variable_table_name):
temp_csv = home_dir + "/PycharmProjects/crystal/crystal/static/temp.csv"
conn, c = open_data_base_connection()
# Get variable data
c.execute("""SELECT * FROM {}""".format(variable_table_name))
with open(temp_csv, "w", newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
print("File saved: {}".format(temp_csv))
conn.close()
return temp_csv | Generates a temporary CSV file that contains the data for the selected variable table name.
:param variable_table_name: str, variable table name
:return: str, temp CSV file path | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/sql_table_utils.py#L322-L340 | [
"def open_data_base_connection(skip_dir_check=False):\n \"\"\"\n Creates a connections to the crystal database.\n :param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.\n This is used by Crystal.py to create a new database the first time.\n False -> Raises a error if the database file is not found.\n :return: conn, c -> connection and cursor object\n \"\"\"\n if not skip_dir_check:\n assert os.path.isfile(dd.get_database_dir()), \\\n \"Database file not found in {}. \" \\\n \"Please ensure that you have written data atleast once.\".format(dd.get_database_dir())\n\n # Converts np.array to TEXT when inserting\n sqlite3.register_adapter(np.ndarray, adapt_array)\n\n # Converts TEXT to np.array when selecting\n sqlite3.register_converter(\"array\", convert_array)\n\n conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n return conn, c\n"
] | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import csv
import sqlite3
import logging
import numpy as np
class DatabaseDir:
def __init__(self):
self.database_name = "crystal.db"
self.home_dir = os.path.expanduser("~")
self.main_data_dir = os.path.join(self.home_dir, "Crystal_data")
self.database_dir = os.path.join(self.main_data_dir, self.database_name)
def get_database_dir(self):
return self.database_dir
def set_database_dir(self, new_database_dir):
self.database_dir = new_database_dir
logging.info("Database dir set to: {}".format(self.database_dir))
# Get main dataset directory
dd = DatabaseDir()
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# TODO: Close database connections in all functions
def open_data_base_connection(skip_dir_check=False):
"""
Creates a connections to the crystal database.
:param skip_dir_check: bool, True -> Skips checking database file and creates a new one if not present.
This is used by Crystal.py to create a new database the first time.
False -> Raises a error if the database file is not found.
:return: conn, c -> connection and cursor object
"""
if not skip_dir_check:
assert os.path.isfile(dd.get_database_dir()), \
"Database file not found in {}. " \
"Please ensure that you have written data atleast once.".format(dd.get_database_dir())
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)
conn = sqlite3.connect(dd.get_database_dir(), detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
return conn, c
def drop_run(project_name, run_name):
"""
Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted
:param project_name: String, project which contains the desire run_name
:param run_name: String, run to delete
"""
conn, c = open_data_base_connection()
# delete all the variable tables first
c.execute("SELECT variable_name FROM {}".format(run_name))
try:
all_variables = np.array(c.fetchall()).squeeze(axis=1)
for i in all_variables:
variable_table_name = run_name + '_' + i
c.execute("""DROP TABLE IF EXISTS {}""".format(variable_table_name))
except np.core._internal.AxisError:
print("Did not find any values, so deleting run table directly.")
c.execute("""DROP TABLE IF EXISTS {}""".format(run_name))
c.execute("""DELETE FROM {} WHERE run_name='{}'""".format(project_name + '_run_table', run_name))
# delete project if project_name+'_run_table' is empty
c.execute("""SELECT run_name FROM {}""".format(project_name + '_run_table'))
all_runs = c.fetchall()
if len(all_runs) == 0:
c.execute("""DROP TABLE IF EXISTS {}""".format(project_name + '_run_table'))
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} table deleted".format(run_name))
def drop_project(project_name):
"""
Deletes all the tables associated with a project and removes it from the main_table
:param project_name: String, project to delete
"""
conn, c = open_data_base_connection()
# Need to delete all the run_tables before removing the project_table and the entry from the main_table
run_table_name = project_name + '_run_table'
c.execute("SELECT run_name FROM {}".format(run_table_name))
run_names = np.array(c.fetchall()).squeeze(axis=1)
# remove one run at a time
for run in run_names:
drop_run(project_name, run)
c.execute("DROP TABLE IF EXISTS {}".format(run_table_name))
# Remove the project row from main table
c.execute("""DELETE FROM main_table WHERE project_name='{}'""".format(project_name))
conn.commit()
print("{} project deleted".format(project_name))
def get_latest_run():
"""
Returns the run latest run from the database.
:return: dict -> {<int_keys>: <run_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall())
latest_run_name = convert_list_to_dict(np.squeeze(run_names, 1)[-1])
conn.close()
return latest_run_name
def get_latest_project_and_runs():
"""
Returns both the latest project and runs from the database.
:return: dict -> {"latest_project": <project_name>, "latest_runs": [<run_name>]}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall())
latest_project_name = project_names[-1][-1]
# Get latest run
c.execute("""SELECT run_name FROM {}""".format(latest_project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
conn.close()
return {"latest_project": latest_project_name, "latest_runs": run_names}
def get_figure_stats(run_table_name):
"""
Returns the latest variable names
:param run_table_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_table_name))
variable_names = convert_list_to_dict(np.array(c.fetchall()).squeeze(axis=1))
conn.close()
return variable_names
def get_latest_stats():
"""
Returns the latest run and variable names.
:return: dict -> {"latest_run": <run_name>, "variable_name": {<int_keys>: <variable_name>}}
"""
latest_run_name = get_latest_run()
variable_names = get_figure_stats(latest_run_name)
latest_stats = {'latest_run': latest_run_name, 'variable_names': variable_names}
return latest_stats
def get_projects():
"""
Returns a dict of projects present in the database.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(c.fetchall()).squeeze(axis=1)
project_names = convert_list_to_dict(project_names)
return project_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_runs(project_name):
"""
Returns a dict of runs present in project.
:return: dict -> {<int_keys>: <project_name>}
"""
conn, c = open_data_base_connection()
try:
c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table"))
run_names = np.array(c.fetchall()).squeeze(axis=1)
run_names = convert_list_to_dict(run_names)
return run_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variables(run_name):
"""
Returns a dict of variables in the selected run table.
:param run_name: str, required run table name
:return: dict -> {<int_keys>: <variable_name>}
"""
conn, c = open_data_base_connection()
try:
# Get latest project
c.execute("""SELECT variable_name FROM {}""".format(run_name))
variable_names = np.array(c.fetchall()).squeeze(axis=1)
variable_names = convert_list_to_dict(variable_names)
return variable_names
except sqlite3.OperationalError:
logging.info("{} not found".format(run_name))
finally:
conn.close()
def get_variable_update_dicts(current_index, variable_names, selected_run):
"""
Query appropriate tables and return data to dashboard in the required format.
:param current_index: int, current index during update
:param variable_names: str, variable name to fetch values from
:param selected_run: str, run containing the variable
:return: dict, {<variable_name>: [<values>]}
"""
conn, c = open_data_base_connection()
data = {}
for _, v_n in variable_names:
data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}
try:
# values for each variable
for _, v_n in variable_names:
plot_type = v_n.split("_")[0]
if plot_type == "scalar":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["y"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
elif plot_type == "heatmap":
try:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT V_names FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
v_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
data[v_n]["vn"] = v_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except sqlite3.OperationalError:
c.execute("""SELECT X_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
x_values = np.array(c.fetchall()).squeeze().tolist()
c.execute("""SELECT Y_value FROM {} WHERE rowid > {}""".format(selected_run + "_" + v_n,
current_index[v_n]))
y_values = np.array(c.fetchall()).squeeze().tolist()
data[v_n]["x"] = x_values
data[v_n]["z"] = y_values
n_values = len(x_values)
current_index["{}".format(v_n)] += n_values
logging.info("New value found and updated")
except IndexError:
logging.info("No new data point found")
except KeyError:
logging.error("I think the run variable has changes. So, I'm passing no data.")
conn.close()
return data
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)}
|
Naresh1318/crystal | crystal/Crystal.py | get_valid_time_stamp | python | def get_valid_time_stamp():
time_stamp = str(datetime.datetime.now())
time_stamp = "time_" + time_stamp.replace("-", "_").replace(":", "_").replace(" ", "_").replace(".", "_")
return time_stamp | Get a valid time stamp without illegal characters.
Adds time_ to make the time stamp a valid table name in sql.
:return: String, extracted timestamp | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/Crystal.py#L17-L25 | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import datetime
import numpy as np
from . import sql_table_utils as utils
DEFAULT_DATABASE_DIR_NAME = "Crystal_data"
class Crystal:
"""
Provides methods to store various types of data onto the database.
docs:
* Creates a new project using the script name if no project name has been provided.
* Creates a new run table for every class instantiation.
"""
def __init__(self, project_name=None, database_dir=None):
"""
Create a crystal instance that could be used to write data onto the database.
:param project_name: str, default -> None, uses the script name the instance is being used from as the
project name.
-> str, uses this name instead.
"""
if project_name is None:
self.called_from = os.path.realpath(sys.argv[0])
self.project_name = os.path.basename(self.called_from)[:-3] # Remove .py
self.project_name = self.project_name.split(".")[0]
else:
# Spaces not allowed for project name
assert len(project_name.split(" ")) < 2, \
"Ensure that you don't have spaces in your variable name, use '_' instead."
self.project_name = project_name
self.time_stamp = get_valid_time_stamp()
self.previous = [None]
if database_dir is None:
# Create a new database on the home directory if not present
home_dir = os.path.expanduser("~")
main_data_dir = os.path.join(home_dir, DEFAULT_DATABASE_DIR_NAME)
if not os.path.exists(main_data_dir):
print("Crystal_data directory not found. Creating a new one...")
os.mkdir(main_data_dir)
else:
utils.dd.set_database_dir(new_database_dir=database_dir)
# Create new project and run tables if not already found
self.conn, self.c = utils.open_data_base_connection(skip_dir_check=True)
self.run_table_name = self.project_name + '_' + 'run_table'
self.c.execute("""CREATE TABLE IF NOT EXISTS main_table (
project_name VARCHAR
)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
run_name VARCHAR
)""".format(self.run_table_name))
# Add current project and run to the main table and run_table if not already present
# main_table
self.c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(self.c.fetchall()).squeeze()
if self.project_name not in project_names:
self.c.execute("""INSERT INTO main_table (
project_name) VALUES ('{}'
)""".format(self.project_name))
# run_table
self.c.execute("""SELECT run_name FROM {run_table}""".format(run_table=self.run_table_name))
run_names = np.array(self.c.fetchall()).squeeze()
if self.time_stamp not in run_names:
self.c.execute("""INSERT INTO {} (
run_name) VALUES ('{}'
)""".format(self.run_table_name, self.time_stamp))
# variable_table -> time_stamp_table
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
variable_name VARCHAR, variable_type VARCHAR
)""".format(self.time_stamp))
self.conn.commit()
def scalar(self, value, step, name):
"""
Plot a scalar value.
:param value: int or float, the value on the y-axis
:param step: int or float, the value on the x-axis
:param name: String, the name of the variable to be used during visualization
"""
# Spaces not allowed for scalar variable name
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "scalar_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="scalar"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value FLOAT, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES ('{x}', '{y}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
x=step, y=value, time=datetime.datetime.now()))
self.conn.commit()
# TODO: Test this
def image(self, image, name):
"""
Show image on the Crystal server.
:param image:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "image_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="image"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
images BLOB, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
images, time) VALUES ('{img}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
img=sqlite3.Binary(np.array(image).tobytes()), time=datetime.datetime.now()))
self.conn.commit()
def heatmap(self, value, step, name, value_names=None):
"""
:param value_names:
:param step:
:param value:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "heatmap_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="heatmap"))
else:
self.previous.pop()
if value_names is None:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES (?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, datetime.datetime.now()))
else:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, V_names ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, V_names, time) VALUES (?, ?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, value_names, datetime.datetime.now()))
self.conn.commit()
def fft(self):
pass
|
Naresh1318/crystal | crystal/Crystal.py | Crystal.scalar | python | def scalar(self, value, step, name):
# Spaces not allowed for scalar variable name
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "scalar_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="scalar"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value FLOAT, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES ('{x}', '{y}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
x=step, y=value, time=datetime.datetime.now()))
self.conn.commit() | Plot a scalar value.
:param value: int or float, the value on the y-axis
:param step: int or float, the value on the x-axis
:param name: String, the name of the variable to be used during visualization | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/Crystal.py#L101-L128 | null | class Crystal:
"""
Provides methods to store various types of data onto the database.
docs:
* Creates a new project using the script name if no project name has been provided.
* Creates a new run table for every class instantiation.
"""
def __init__(self, project_name=None, database_dir=None):
"""
Create a crystal instance that could be used to write data onto the database.
:param project_name: str, default -> None, uses the script name the instance is being used from as the
project name.
-> str, uses this name instead.
"""
if project_name is None:
self.called_from = os.path.realpath(sys.argv[0])
self.project_name = os.path.basename(self.called_from)[:-3] # Remove .py
self.project_name = self.project_name.split(".")[0]
else:
# Spaces not allowed for project name
assert len(project_name.split(" ")) < 2, \
"Ensure that you don't have spaces in your variable name, use '_' instead."
self.project_name = project_name
self.time_stamp = get_valid_time_stamp()
self.previous = [None]
if database_dir is None:
# Create a new database on the home directory if not present
home_dir = os.path.expanduser("~")
main_data_dir = os.path.join(home_dir, DEFAULT_DATABASE_DIR_NAME)
if not os.path.exists(main_data_dir):
print("Crystal_data directory not found. Creating a new one...")
os.mkdir(main_data_dir)
else:
utils.dd.set_database_dir(new_database_dir=database_dir)
# Create new project and run tables if not already found
self.conn, self.c = utils.open_data_base_connection(skip_dir_check=True)
self.run_table_name = self.project_name + '_' + 'run_table'
self.c.execute("""CREATE TABLE IF NOT EXISTS main_table (
project_name VARCHAR
)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
run_name VARCHAR
)""".format(self.run_table_name))
# Add current project and run to the main table and run_table if not already present
# main_table
self.c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(self.c.fetchall()).squeeze()
if self.project_name not in project_names:
self.c.execute("""INSERT INTO main_table (
project_name) VALUES ('{}'
)""".format(self.project_name))
# run_table
self.c.execute("""SELECT run_name FROM {run_table}""".format(run_table=self.run_table_name))
run_names = np.array(self.c.fetchall()).squeeze()
if self.time_stamp not in run_names:
self.c.execute("""INSERT INTO {} (
run_name) VALUES ('{}'
)""".format(self.run_table_name, self.time_stamp))
# variable_table -> time_stamp_table
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
variable_name VARCHAR, variable_type VARCHAR
)""".format(self.time_stamp))
self.conn.commit()
# TODO: Test this
def image(self, image, name):
"""
Show image on the Crystal server.
:param image:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "image_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="image"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
images BLOB, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
images, time) VALUES ('{img}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
img=sqlite3.Binary(np.array(image).tobytes()), time=datetime.datetime.now()))
self.conn.commit()
def heatmap(self, value, step, name, value_names=None):
"""
:param value_names:
:param step:
:param value:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "heatmap_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="heatmap"))
else:
self.previous.pop()
if value_names is None:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES (?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, datetime.datetime.now()))
else:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, V_names ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, V_names, time) VALUES (?, ?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, value_names, datetime.datetime.now()))
self.conn.commit()
def fft(self):
pass
|
Naresh1318/crystal | crystal/Crystal.py | Crystal.image | python | def image(self, image, name):
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "image_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="image"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
images BLOB, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
images, time) VALUES ('{img}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
img=sqlite3.Binary(np.array(image).tobytes()), time=datetime.datetime.now()))
self.conn.commit() | Show image on the Crystal server.
:param image:
:param name:
:return: | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/Crystal.py#L131-L157 | null | class Crystal:
"""
Provides methods to store various types of data onto the database.
docs:
* Creates a new project using the script name if no project name has been provided.
* Creates a new run table for every class instantiation.
"""
def __init__(self, project_name=None, database_dir=None):
"""
Create a crystal instance that could be used to write data onto the database.
:param project_name: str, default -> None, uses the script name the instance is being used from as the
project name.
-> str, uses this name instead.
"""
if project_name is None:
self.called_from = os.path.realpath(sys.argv[0])
self.project_name = os.path.basename(self.called_from)[:-3] # Remove .py
self.project_name = self.project_name.split(".")[0]
else:
# Spaces not allowed for project name
assert len(project_name.split(" ")) < 2, \
"Ensure that you don't have spaces in your variable name, use '_' instead."
self.project_name = project_name
self.time_stamp = get_valid_time_stamp()
self.previous = [None]
if database_dir is None:
# Create a new database on the home directory if not present
home_dir = os.path.expanduser("~")
main_data_dir = os.path.join(home_dir, DEFAULT_DATABASE_DIR_NAME)
if not os.path.exists(main_data_dir):
print("Crystal_data directory not found. Creating a new one...")
os.mkdir(main_data_dir)
else:
utils.dd.set_database_dir(new_database_dir=database_dir)
# Create new project and run tables if not already found
self.conn, self.c = utils.open_data_base_connection(skip_dir_check=True)
self.run_table_name = self.project_name + '_' + 'run_table'
self.c.execute("""CREATE TABLE IF NOT EXISTS main_table (
project_name VARCHAR
)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
run_name VARCHAR
)""".format(self.run_table_name))
# Add current project and run to the main table and run_table if not already present
# main_table
self.c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(self.c.fetchall()).squeeze()
if self.project_name not in project_names:
self.c.execute("""INSERT INTO main_table (
project_name) VALUES ('{}'
)""".format(self.project_name))
# run_table
self.c.execute("""SELECT run_name FROM {run_table}""".format(run_table=self.run_table_name))
run_names = np.array(self.c.fetchall()).squeeze()
if self.time_stamp not in run_names:
self.c.execute("""INSERT INTO {} (
run_name) VALUES ('{}'
)""".format(self.run_table_name, self.time_stamp))
# variable_table -> time_stamp_table
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
variable_name VARCHAR, variable_type VARCHAR
)""".format(self.time_stamp))
self.conn.commit()
def scalar(self, value, step, name):
"""
Plot a scalar value.
:param value: int or float, the value on the y-axis
:param step: int or float, the value on the x-axis
:param name: String, the name of the variable to be used during visualization
"""
# Spaces not allowed for scalar variable name
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "scalar_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="scalar"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value FLOAT, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES ('{x}', '{y}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
x=step, y=value, time=datetime.datetime.now()))
self.conn.commit()
# TODO: Test this
def heatmap(self, value, step, name, value_names=None):
"""
:param value_names:
:param step:
:param value:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "heatmap_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="heatmap"))
else:
self.previous.pop()
if value_names is None:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES (?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, datetime.datetime.now()))
else:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, V_names ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, V_names, time) VALUES (?, ?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, value_names, datetime.datetime.now()))
self.conn.commit()
def fft(self):
pass
|
Naresh1318/crystal | crystal/app.py | index | python | def index():
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html") | Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask. | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L58-L74 | null | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | update | python | def update():
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data) | Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script. | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L78-L100 | [
"def get_variables(run_name):\n \"\"\"\n Returns a dict of variables in the selected run table.\n :param run_name: str, required run table name\n :return: dict -> {<int_keys>: <variable_name>}\n \"\"\"\n conn, c = open_data_base_connection()\n try:\n # Get latest project\n c.execute(\"\"\"SELECT variable_name FROM {}\"\"\".format(run_name))\n variable_names = np.array(c.fetchall()).squeeze(axis=1)\n variable_names = convert_list_to_dict(variable_names)\n return variable_names\n except sqlite3.OperationalError:\n logging.info(\"{} not found\".format(run_name))\n finally:\n conn.close()\n",
"def get_variable_update_dicts(current_index, variable_names, selected_run):\n \"\"\"\n Query appropriate tables and return data to dashboard in the required format.\n :param current_index: int, current index during update\n :param variable_names: str, variable name to fetch values from\n :param selected_run: str, run containing the variable\n :return: dict, {<variable_name>: [<values>]}\n \"\"\"\n conn, c = open_data_base_connection()\n data = {}\n for _, v_n in variable_names:\n data[v_n] = {'x': [], 'y': [], 'z': [], 'vn': []}\n\n try:\n # values for each variable\n for _, v_n in variable_names:\n plot_type = v_n.split(\"_\")[0]\n if plot_type == \"scalar\":\n try:\n c.execute(\"\"\"SELECT X_value FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n x_values = np.array(c.fetchall()).squeeze().tolist()\n c.execute(\"\"\"SELECT Y_value FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n y_values = np.array(c.fetchall()).squeeze().tolist()\n data[v_n][\"x\"] = x_values\n data[v_n][\"y\"] = y_values\n n_values = len(x_values)\n current_index[\"{}\".format(v_n)] += n_values\n logging.info(\"New value found and updated\")\n except IndexError:\n logging.info(\"No new data point found\")\n elif plot_type == \"heatmap\":\n try:\n c.execute(\"\"\"SELECT X_value FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n x_values = np.array(c.fetchall()).squeeze().tolist()\n c.execute(\"\"\"SELECT Y_value FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n y_values = np.array(c.fetchall()).squeeze().tolist()\n c.execute(\"\"\"SELECT V_names FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n v_values = np.array(c.fetchall()).squeeze().tolist()\n data[v_n][\"x\"] = x_values\n data[v_n][\"z\"] = y_values\n data[v_n][\"vn\"] = v_values\n n_values = len(x_values)\n current_index[\"{}\".format(v_n)] += n_values\n logging.info(\"New value found and updated\")\n except sqlite3.OperationalError:\n c.execute(\"\"\"SELECT X_value FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n x_values = np.array(c.fetchall()).squeeze().tolist()\n c.execute(\"\"\"SELECT Y_value FROM {} WHERE rowid > {}\"\"\".format(selected_run + \"_\" + v_n,\n current_index[v_n]))\n y_values = np.array(c.fetchall()).squeeze().tolist()\n data[v_n][\"x\"] = x_values\n data[v_n][\"z\"] = y_values\n n_values = len(x_values)\n current_index[\"{}\".format(v_n)] += n_values\n logging.info(\"New value found and updated\")\n except IndexError:\n logging.info(\"No new data point found\")\n except KeyError:\n logging.error(\"I think the run variable has changes. So, I'm passing no data.\")\n\n conn.close()\n return data\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | get_projects | python | def get_projects():
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L104-L121 | [
"def get_projects():\n \"\"\"\n Returns a dict of projects present in the database.\n :return: dict -> {<int_keys>: <project_name>}\n \"\"\"\n conn, c = open_data_base_connection()\n try:\n c.execute(\"\"\"SELECT project_name FROM main_table\"\"\")\n project_names = np.array(c.fetchall()).squeeze(axis=1)\n project_names = convert_list_to_dict(project_names)\n return project_names\n except sqlite3.OperationalError:\n logging.info(\"{} not found\".format(run_name))\n finally:\n conn.close()\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | get_runs | python | def get_runs():
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L125-L144 | [
"def get_runs(project_name):\n \"\"\"\n Returns a dict of runs present in project.\n :return: dict -> {<int_keys>: <project_name>}\n \"\"\"\n conn, c = open_data_base_connection()\n try:\n c.execute(\"\"\"SELECT run_name FROM {}\"\"\".format(project_name + \"_run_table\"))\n run_names = np.array(c.fetchall()).squeeze(axis=1)\n run_names = convert_list_to_dict(run_names)\n return run_names\n except sqlite3.OperationalError:\n logging.info(\"{} not found\".format(run_name))\n finally:\n conn.close()\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | get_variables | python | def get_variables():
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>} | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L148-L175 | [
"def get_variables(run_name):\n \"\"\"\n Returns a dict of variables in the selected run table.\n :param run_name: str, required run table name\n :return: dict -> {<int_keys>: <variable_name>}\n \"\"\"\n conn, c = open_data_base_connection()\n try:\n # Get latest project\n c.execute(\"\"\"SELECT variable_name FROM {}\"\"\".format(run_name))\n variable_names = np.array(c.fetchall()).squeeze(axis=1)\n variable_names = convert_list_to_dict(variable_names)\n return variable_names\n except sqlite3.OperationalError:\n logging.info(\"{} not found\".format(run_name))\n finally:\n conn.close()\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | get_graph_csv | python | def get_graph_csv():
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file. | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L179-L192 | [
"def generate_graph_csv(variable_table_name):\n \"\"\"\n Generates a temporary CSV file that contains the data for the selected variable table name.\n :param variable_table_name: str, variable table name\n :return: str, temp CSV file path\n \"\"\"\n temp_csv = home_dir + \"/PycharmProjects/crystal/crystal/static/temp.csv\"\n conn, c = open_data_base_connection()\n\n # Get variable data\n c.execute(\"\"\"SELECT * FROM {}\"\"\".format(variable_table_name))\n with open(temp_csv, \"w\", newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow([i[0] for i in c.description]) # write headers\n csv_writer.writerows(c)\n print(\"File saved: {}\".format(temp_csv))\n conn.close()\n\n return temp_csv\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | delete_run | python | def delete_run():
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Delete the selected run from the database.
:return: | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L196-L209 | [
"def drop_run(project_name, run_name):\n \"\"\"\n Deletes a run from a desired project. If this causes the run_table to be empty then the entire project gets deleted\n :param project_name: String, project which contains the desire run_name\n :param run_name: String, run to delete\n \"\"\"\n conn, c = open_data_base_connection()\n # delete all the variable tables first\n c.execute(\"SELECT variable_name FROM {}\".format(run_name))\n try:\n all_variables = np.array(c.fetchall()).squeeze(axis=1)\n for i in all_variables:\n variable_table_name = run_name + '_' + i\n c.execute(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(variable_table_name))\n except np.core._internal.AxisError:\n print(\"Did not find any values, so deleting run table directly.\")\n\n c.execute(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(run_name))\n c.execute(\"\"\"DELETE FROM {} WHERE run_name='{}'\"\"\".format(project_name + '_run_table', run_name))\n\n # delete project if project_name+'_run_table' is empty\n c.execute(\"\"\"SELECT run_name FROM {}\"\"\".format(project_name + '_run_table'))\n all_runs = c.fetchall()\n if len(all_runs) == 0:\n c.execute(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(project_name + '_run_table'))\n c.execute(\"\"\"DELETE FROM main_table WHERE project_name='{}'\"\"\".format(project_name))\n\n conn.commit()\n print(\"{} table deleted\".format(run_name))\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
@app.route('/delete_project', methods=['GET', 'POST'])
def delete_project():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
if __name__ == '__main__':
app.run(debug=True)
|
Naresh1318/crystal | crystal/app.py | delete_project | python | def delete_project():
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_project(selections["project"])
return jsonify({"response": "deleted {}".format(selections["project"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Delete the selected run from the database.
:return: | train | https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L213-L226 | [
"def drop_project(project_name):\n \"\"\"\n Deletes all the tables associated with a project and removes it from the main_table\n :param project_name: String, project to delete\n \"\"\"\n conn, c = open_data_base_connection()\n # Need to delete all the run_tables before removing the project_table and the entry from the main_table\n run_table_name = project_name + '_run_table'\n\n c.execute(\"SELECT run_name FROM {}\".format(run_table_name))\n run_names = np.array(c.fetchall()).squeeze(axis=1)\n\n # remove one run at a time\n for run in run_names:\n drop_run(project_name, run)\n\n c.execute(\"DROP TABLE IF EXISTS {}\".format(run_table_name))\n\n # Remove the project row from main table\n c.execute(\"\"\"DELETE FROM main_table WHERE project_name='{}'\"\"\".format(project_name))\n conn.commit()\n print(\"{} project deleted\".format(project_name))\n"
] | # -*- coding: utf-8 -*-
"""
crystal.app
~~~~~~~~~~~~~~
A multipurpose real-time plotting library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import logging
import numpy as np
from flask import Flask, render_template, jsonify, request, send_file, json
import crystal.sql_table_utils as utils
# Setup logging
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_index = {} # Used to keep track of the index for plotting
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='%%', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string='%%',
))
app = CustomFlask(__name__) # This replaces your existing "app = Flask(__name__)"
def set_database_dir(database_dir):
logging.info(database_dir)
if database_dir == "" or database_dir is None:
logging.info("database_dir: {} is not provided. Using the default directory at {}".
format(database_dir, utils.dd.get_database_dir()))
else:
utils.dd.set_database_dir(database_dir)
@app.route('/')
def index():
"""
Renders the dashboard when the server is initially run.
Usage description:
The rendered HTML allows the user to select a project and the desired run.
:return: Template to render, Object that is taken care by flask.
"""
# Reset current index values when the page is refreshed
for k, v in current_index.items():
current_index[k] = 0
logging.info("Dashboard refreshed")
# render the template (below) that will use JavaScript to read the stream
return render_template("crystal_dashboard.html")
@app.route('/update', methods=['POST'])
def update():
"""
Called by XMLHTTPrequest function periodically to get new graph data.
Usage description:
This function queries the database and returns all the newly added values.
:return: JSON Object, passed on to the JS script.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == 'POST':
# Get figure stats
selected_run = request.form['selected_run']
variable_names = utils.get_variables(selected_run).items()
if len(current_index) < 1:
for _, v_n in variable_names:
current_index[v_n] = 0
logging.info("Current index: {}".format(current_index))
data = utils.get_variable_update_dicts(current_index, variable_names, selected_run)
return jsonify(data)
@app.route('/get_projects', methods=['GET'])
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_runs', methods=['POST'])
def get_runs():
"""
Send a dictionary of runs associated with the selected project.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_project = request.form["selected_project"]
runs = utils.get_runs(selected_project)
return jsonify(runs)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_variables', methods=['POST'])
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/get_graph_csv', methods=['POST'])
def get_graph_csv():
"""
Allows the user to download a graph's data as a CSV file.
:return: show a dialog box that allows the user to download the CSV file.
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_variable_table = request.form["selected_variable_table"]
filename = utils.generate_graph_csv(selected_variable_table)
return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table))
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_run', methods=['GET', 'POST'])
def delete_run():
"""
Delete the selected run from the database.
:return:
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selections = json.loads(request.form["selections"])
utils.drop_run(selections["project"], selections["run"])
return jsonify({"response": "deleted {}".format(selections["run"])})
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"})
@app.route('/delete_project', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run(debug=True)
|
6809/dragonlib | dragonlib/utils/byte_word_values.py | bin2hexline | python | def bin2hexline(data, add_addr=True, width=16):
data = bytearray(data)
# same as string.printable but without \t\n\r\v\f ;)
printable = string.digits + string.ascii_letters + string.punctuation + " "
addr = 0
lines = []
run = True
line_width = 4 + (width * 3) + 1
while run:
if add_addr:
line = ["%04i" % addr]
else:
line = []
ascii_block = ""
for i in range(width):
b = data[addr]
if chr(b) in printable:
ascii_block += chr(b)
else:
ascii_block += "."
line.append("%02x" % b)
addr += 1
if addr >= len(data):
run = False
break
line = " ".join(line)
line = line.ljust(line_width)
line += ascii_block
lines.append(line)
return lines | Format binary data to a Hex-Editor like format...
>>> data = bytearray([i for i in range(256)])
>>> print('\\n'.join(bin2hexline(data, width=16)))
0000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f ................
0016 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f ................
0032 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f !"#$%&'()*+,-./
0048 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 0123456789:;<=>?
0064 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
0080 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f PQRSTUVWXYZ[\]^_
0096 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f `abcdefghijklmno
0112 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f pqrstuvwxyz{|}~.
0128 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f ................
0144 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f ................
0160 a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af ................
0176 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf ................
0192 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf ................
0208 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df ................
0224 e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef ................
0240 f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff ................
with open("C:\Python27\python.exe", "rb") as f:
data = f.read(150)
print("\n".join(bin2hexline(data, width=16)))
0000 4d 5a 90 00 03 00 00 00 04 00 00 00 ff ff 00 00 MZ..............
0016 b8 00 00 00 00 00 00 00 40 00 00 00 00 00 00 00 ........@.......
0032 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
0048 00 00 00 00 00 00 00 00 00 00 00 00 e8 00 00 00 ................
0064 0e 1f ba 0e 00 b4 09 cd 21 b8 01 4c cd 21 54 68 ........!..L.!Th
0080 69 73 20 70 72 6f 67 72 61 6d 20 63 61 6e 6e 6f is.program.canno
0096 74 20 62 65 20 72 75 6e 20 69 6e 20 44 4f 53 20 t.be.run.in.DOS.
0112 6d 6f 64 65 2e 0d 0d 0a 24 00 00 00 00 00 00 00 mode....$.......
0128 9d 68 ba 89 d9 09 d4 da d9 09 d4 da d9 09 d4 da .h..............
0144 d0 71 41 da d8 09 .qA... | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/utils/byte_word_values.py#L78-L153 | null | #!/usr/bin/env python
"""
DragonPy - Dragon 32 emulator in Python
=======================================
some code is borrowed from:
XRoar emulator by Ciaran Anscomb (GPL license) more info, see README
:copyleft: 2013-2014 by the DragonLib team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import string
import six
def signed5(x):
""" convert to signed 5-bit """
if x > 0xf: # 0xf == 2**4-1 == 15
x = x - 0x20 # 0x20 == 2**5 == 32
return x
def signed8(x):
""" convert to signed 8-bit """
if x > 0x7f: # 0x7f == 2**7-1 == 127
x = x - 0x100 # 0x100 == 2**8 == 256
return x
def unsigned8(x):
""" convert a signed 8-Bit value into a unsigned value """
if x < 0:
x = x + 0x0100 # 0x100 == 2**8 == 256
return x
def signed16(x):
""" convert to signed 16-bit """
if x > 0x7fff: # 0x7fff == 2**15-1 == 32767
x = x - 0x10000 # 0x100 == 2**16 == 65536
return x
def word2bytes(value):
"""
>>> word2bytes(0xff09)
(255, 9)
>>> [hex(i) for i in word2bytes(0xffab)]
['0xff', '0xab']
>>> word2bytes(0xffff +1)
Traceback (most recent call last):
...
AssertionError
"""
assert 0 <= value <= 0xffff
return (value >> 8, value & 0xff)
def bytes2word(byte_list):
"""
>>> bytes2word([0xff,0xab])
65451
>>> hex(bytes2word([0xff,0xab]))
'0xffab'
"""
assert len(byte_list) == 2
return (byte_list[0] << 8) + byte_list[1]
def _bin2hexline_example():
import sys
with open(sys.executable, "rb") as f:
data = f.read(500)
print("\n".join(bin2hexline(data, width=16)))
if __name__ == "__main__":
import doctest
print(doctest.testmod(verbose=0))
# _bin2hexline_example()
|
6809/dragonlib | dragonlib/core/binary_files.py | BinaryFile.load_DragonDosBinary | python | def load_DragonDosBinary(self, data, strip_padding=True):
data = bytearray(data)
log.debug("Load Dragon DOS Binary Format.")
meta_data = struct.unpack(">BBHHHB", data[:9])
machine_type = meta_data[0]
if machine_type != 0x55:
log.error("ERROR: Machine type wrong: is $%02X but should be $55!", machine_type)
self.file_type = meta_data[1]
self.load_address = meta_data[2]
self.length = meta_data[3]
self.exec_address = meta_data[4]
terminator = meta_data[5]
if terminator != 0xAA:
log.error("ERROR: Terminator byte is $%02X but should be $AA!", terminator)
# print("before strip:")
# print("\n".join(bin2hexline(data, width=16)))
if strip_padding:
self.data = data[9:self.length + 9]
else:
self.data = data[9:]
# print("after strip:")
# print("\n".join(bin2hexline(self.data, width=16)))
log.debug(
"File type: $%02X Load Address: $%04X Exec Address: $%04X Length: %iBytes",
self.file_type, self.load_address, self.exec_address, self.length
)
if self.length != len(self.data):
log.error("ERROR: Wrong data size: should be: %i Bytes but is %i Bytes!", self.length, len(self.data))
# log_bytes(self.data, "data in hex: %s", level=logging.DEBUG)
self.debug2log(level=logging.DEBUG) | Dragon DOS Binary Format
http://dragon32.info/info/binformt.html
Offset: Type: Value:
0 byte $55 Constant
1 byte Filetype
2:3 word Load Address
4:5 word Length
6:7 word Exec Address
8 byte $AA Constant
9-xxx byte[] Data | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/binary_files.py#L80-L130 | [
"def debug2log(self, level=logging.DEBUG):\n def verbose_value(value, fmt=\"$%02x\"):\n try:\n return fmt % value\n except TypeError:\n return repr(value)\n\n log.log(level, \"File Type: %s\", verbose_value(self.file_type))\n log.log(level, \"Load Address: %s\", verbose_value(self.load_address, fmt=\"$%04x\"))\n if self.length is None:\n log.log(level, \"Length: None\")\n else:\n log.log(level, \"Length: $%04x (dez.: %i Bytes)\",\n self.length, self.length\n )\n log.log(level, \"Exec Address: %s\", verbose_value(self.exec_address, fmt=\"$%04x\"))\n\n if not self.data:\n log.log(level, \"Data: %s\", repr(self.data))\n else:\n log.log(level, \"Data:\")\n for line in bin2hexline(self.data, width=16):\n log.log(level, line)\n"
] | class BinaryFile(object):
def __init__(self):
self.file_type = None # $01 == BAS | $02 == BIN
self.load_address = None
self.length = None
self.exec_address = None
self.data = None
def debug2log(self, level=logging.DEBUG):
def verbose_value(value, fmt="$%02x"):
try:
return fmt % value
except TypeError:
return repr(value)
log.log(level, "File Type: %s", verbose_value(self.file_type))
log.log(level, "Load Address: %s", verbose_value(self.load_address, fmt="$%04x"))
if self.length is None:
log.log(level, "Length: None")
else:
log.log(level, "Length: $%04x (dez.: %i Bytes)",
self.length, self.length
)
log.log(level, "Exec Address: %s", verbose_value(self.exec_address, fmt="$%04x"))
if not self.data:
log.log(level, "Data: %s", repr(self.data))
else:
log.log(level, "Data:")
for line in bin2hexline(self.data, width=16):
log.log(level, line)
def get_header(self):
header = struct.pack(">BBHHHB",
0x55,
self.file_type,
self.load_address,
self.length,
self.exec_address,
0xAA,
)
log_bytes(header, "Dragon DOS binary header in hex: %s", level=logging.DEBUG)
return header
def dump_DragonDosBinary(self):
# log_bytes(self.data, "data in hex: %s", level=logging.DEBUG)
self.debug2log(level=logging.DEBUG)
header = self.get_header()
if six.PY2:
return header + "".join([chr(i) for i in self.data])
return header + self.data
def load_from_bin(self, data):
"""
convert binary files to a ASCII basic string.
Supported are:
* Dragon DOS Binary Format
* CoCo DECB (Disk Extended Color BASIC) Format
see:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139
"""
data = bytearray(data)
machine_type = data[0]
# machine_type = struct.unpack("B", bin[0])[0]
if machine_type == 0x55:
# Dragon DOS Binary Format
self.load_DragonDosBinary(data)
elif machine_type == 0x00:
raise NotImplementedError("CoCo DECB (Disk Extended Color BASIC) Format not supported, yet.")
else:
raise NotImplementedError("ERROR: Format $%02X unknown." % machine_type)
def load_tokenised_dump(self, tokenised_dump, load_address, exec_address):
self.data = bytearray(tokenised_dump)
self.file_type = 0x01
self.load_address = load_address
self.length = len(self.data)
self.exec_address = exec_address |
6809/dragonlib | dragonlib/core/binary_files.py | BinaryFile.load_from_bin | python | def load_from_bin(self, data):
data = bytearray(data)
machine_type = data[0]
# machine_type = struct.unpack("B", bin[0])[0]
if machine_type == 0x55:
# Dragon DOS Binary Format
self.load_DragonDosBinary(data)
elif machine_type == 0x00:
raise NotImplementedError("CoCo DECB (Disk Extended Color BASIC) Format not supported, yet.")
else:
raise NotImplementedError("ERROR: Format $%02X unknown." % machine_type) | convert binary files to a ASCII basic string.
Supported are:
* Dragon DOS Binary Format
* CoCo DECB (Disk Extended Color BASIC) Format
see:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139 | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/binary_files.py#L132-L153 | [
"def load_DragonDosBinary(self, data, strip_padding=True):\n \"\"\"\n Dragon DOS Binary Format\n\n http://dragon32.info/info/binformt.html\n\n Offset: Type: Value:\n 0 byte $55 Constant\n 1 byte Filetype\n 2:3 word Load Address\n 4:5 word Length\n 6:7 word Exec Address\n 8 byte $AA Constant\n 9-xxx byte[] Data\n \"\"\"\n data = bytearray(data)\n\n log.debug(\"Load Dragon DOS Binary Format.\")\n\n meta_data = struct.unpack(\">BBHHHB\", data[:9])\n\n machine_type = meta_data[0]\n if machine_type != 0x55:\n log.error(\"ERROR: Machine type wrong: is $%02X but should be $55!\", machine_type)\n\n self.file_type = meta_data[1]\n self.load_address = meta_data[2]\n self.length = meta_data[3]\n self.exec_address = meta_data[4]\n terminator = meta_data[5]\n if terminator != 0xAA:\n log.error(\"ERROR: Terminator byte is $%02X but should be $AA!\", terminator)\n\n # print(\"before strip:\")\n # print(\"\\n\".join(bin2hexline(data, width=16)))\n if strip_padding:\n self.data = data[9:self.length + 9]\n else:\n self.data = data[9:]\n # print(\"after strip:\")\n # print(\"\\n\".join(bin2hexline(self.data, width=16)))\n\n log.debug(\n \"File type: $%02X Load Address: $%04X Exec Address: $%04X Length: %iBytes\",\n self.file_type, self.load_address, self.exec_address, self.length\n )\n if self.length != len(self.data):\n log.error(\"ERROR: Wrong data size: should be: %i Bytes but is %i Bytes!\", self.length, len(self.data))\n\n # log_bytes(self.data, \"data in hex: %s\", level=logging.DEBUG)\n self.debug2log(level=logging.DEBUG)\n"
] | class BinaryFile(object):
def __init__(self):
self.file_type = None # $01 == BAS | $02 == BIN
self.load_address = None
self.length = None
self.exec_address = None
self.data = None
def debug2log(self, level=logging.DEBUG):
def verbose_value(value, fmt="$%02x"):
try:
return fmt % value
except TypeError:
return repr(value)
log.log(level, "File Type: %s", verbose_value(self.file_type))
log.log(level, "Load Address: %s", verbose_value(self.load_address, fmt="$%04x"))
if self.length is None:
log.log(level, "Length: None")
else:
log.log(level, "Length: $%04x (dez.: %i Bytes)",
self.length, self.length
)
log.log(level, "Exec Address: %s", verbose_value(self.exec_address, fmt="$%04x"))
if not self.data:
log.log(level, "Data: %s", repr(self.data))
else:
log.log(level, "Data:")
for line in bin2hexline(self.data, width=16):
log.log(level, line)
def get_header(self):
header = struct.pack(">BBHHHB",
0x55,
self.file_type,
self.load_address,
self.length,
self.exec_address,
0xAA,
)
log_bytes(header, "Dragon DOS binary header in hex: %s", level=logging.DEBUG)
return header
def dump_DragonDosBinary(self):
# log_bytes(self.data, "data in hex: %s", level=logging.DEBUG)
self.debug2log(level=logging.DEBUG)
header = self.get_header()
if six.PY2:
return header + "".join([chr(i) for i in self.data])
return header + self.data
def load_DragonDosBinary(self, data, strip_padding=True):
"""
Dragon DOS Binary Format
http://dragon32.info/info/binformt.html
Offset: Type: Value:
0 byte $55 Constant
1 byte Filetype
2:3 word Load Address
4:5 word Length
6:7 word Exec Address
8 byte $AA Constant
9-xxx byte[] Data
"""
data = bytearray(data)
log.debug("Load Dragon DOS Binary Format.")
meta_data = struct.unpack(">BBHHHB", data[:9])
machine_type = meta_data[0]
if machine_type != 0x55:
log.error("ERROR: Machine type wrong: is $%02X but should be $55!", machine_type)
self.file_type = meta_data[1]
self.load_address = meta_data[2]
self.length = meta_data[3]
self.exec_address = meta_data[4]
terminator = meta_data[5]
if terminator != 0xAA:
log.error("ERROR: Terminator byte is $%02X but should be $AA!", terminator)
# print("before strip:")
# print("\n".join(bin2hexline(data, width=16)))
if strip_padding:
self.data = data[9:self.length + 9]
else:
self.data = data[9:]
# print("after strip:")
# print("\n".join(bin2hexline(self.data, width=16)))
log.debug(
"File type: $%02X Load Address: $%04X Exec Address: $%04X Length: %iBytes",
self.file_type, self.load_address, self.exec_address, self.length
)
if self.length != len(self.data):
log.error("ERROR: Wrong data size: should be: %i Bytes but is %i Bytes!", self.length, len(self.data))
# log_bytes(self.data, "data in hex: %s", level=logging.DEBUG)
self.debug2log(level=logging.DEBUG)
def load_tokenised_dump(self, tokenised_dump, load_address, exec_address):
self.data = bytearray(tokenised_dump)
self.file_type = 0x01
self.load_address = load_address
self.length = len(self.data)
self.exec_address = exec_address |
6809/dragonlib | dragonlib/utils/iter_utils.py | list_replace | python | def list_replace(iterable, src, dst):
result=[]
iterable=list(iterable)
try:
dst=list(dst)
except TypeError: # e.g.: int
dst=[dst]
src=list(src)
src_len=len(src)
index = 0
while index < len(iterable):
element = iterable[index:index+src_len]
# print element, src
if element == src:
result += dst
index += src_len
else:
result.append(iterable[index])
index += 1
return result | Thanks to "EyDu":
http://www.python-forum.de/viewtopic.php?f=1&t=34539 (de)
>>> list_replace([1,2,3], (1,2), "X")
['X', 3]
>>> list_replace([1,2,3,4], (2,3), 9)
[1, 9, 4]
>>> list_replace([1,2,3], (2,), [9,8])
[1, 9, 8, 3]
>>> list_replace([1,2,3,4,5], (2,3,4), "X")
[1, 'X', 5]
>>> list_replace([1,2,3,4,5], (4,5), "X")
[1, 2, 3, 'X']
>>> list_replace([1,2,3,4,5], (1,2), "X")
['X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), "X")
[1, 2, 'X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), ("A","B","C"))
[1, 2, 'A', 'B', 'C', 3, 4, 5]
>>> list_replace((58, 131, 73, 70), (58, 131), 131)
[131, 73, 70] | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/utils/iter_utils.py#L24-L76 | null | #!/usr/bin/env python
# encoding:utf-8
"""
iter utilities
~~~~~~~~~~~~~~
:copyleft: 2014 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import six
# Obsolete if http://legacy.python.org/dev/peps/pep-0467/ merged:
if six.PY2:
iter_bytes = lambda data: (ord(b) for b in data)
else:
iter_bytes = iter
def list_replace(iterable, src, dst):
"""
Thanks to "EyDu":
http://www.python-forum.de/viewtopic.php?f=1&t=34539 (de)
>>> list_replace([1,2,3], (1,2), "X")
['X', 3]
>>> list_replace([1,2,3,4], (2,3), 9)
[1, 9, 4]
>>> list_replace([1,2,3], (2,), [9,8])
[1, 9, 8, 3]
>>> list_replace([1,2,3,4,5], (2,3,4), "X")
[1, 'X', 5]
>>> list_replace([1,2,3,4,5], (4,5), "X")
[1, 2, 3, 'X']
>>> list_replace([1,2,3,4,5], (1,2), "X")
['X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), "X")
[1, 2, 'X', 3, 4, 5]
>>> list_replace([1,2,3,3,3,4,5], (3,3), ("A","B","C"))
[1, 2, 'A', 'B', 'C', 3, 4, 5]
>>> list_replace((58, 131, 73, 70), (58, 131), 131)
[131, 73, 70]
"""
result=[]
iterable=list(iterable)
try:
dst=list(dst)
except TypeError: # e.g.: int
dst=[dst]
src=list(src)
src_len=len(src)
index = 0
while index < len(iterable):
element = iterable[index:index+src_len]
# print element, src
if element == src:
result += dst
index += src_len
else:
result.append(iterable[index])
index += 1
return result
if __name__ == "__main__":
import doctest
print(doctest.testmod()) |
6809/dragonlib | dragonlib/core/basic_parser.py | ParsedBASIC.pformat | python | def pformat(self):
'''
Manually pformat to force using """...""" and supress escaping apostrophe
'''
result = "{\n"
indent1 = " " * 4
indent2 = " " * 8
for line_no, code_objects in sorted(self.items()):
result += '%s%i: [\n' % (indent1, line_no)
for code_object in code_objects:
result += '%s"""<%s:%s>""",\n' % (
indent2, code_object.PART_TYPE, code_object.content
)
result += '%s],\n' % indent1
result += "}"
return result | Manually pformat to force using """...""" and supress escaping apostrophe | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic_parser.py#L65-L81 | null | class ParsedBASIC(dict):
"""
Normal dict with special __repr__
"""
def __repr__(self):
return self.pformat()
|
6809/dragonlib | dragonlib/core/basic_parser.py | BASICParser.parse | python | def parse(self, ascii_listing):
self.parsed_lines = ParsedBASIC()
for match in self.regex_line_no.finditer(ascii_listing):
log.info("_" * 79)
log.info("parse line >>>%r<<<", match.group())
line_no = int(match.group("no"))
line_content = match.group("content")
self.line_data = []
self._parse_code(line_content)
log.info("*** line %s result: %r", line_no, self.line_data)
self.parsed_lines[line_no] = self.line_data
return self.parsed_lines | parse the given ASCII BASIC listing.
Return a ParsedBASIC() instance. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic_parser.py#L118-L136 | [
"def _parse_code(self, line):\n \"\"\"\n parse the given BASIC line and branch into DATA, String and\n consume a complete Comment\n \"\"\"\n log.debug(\"*** parse CODE: >>>%r<<<\", line)\n parts = self.regex_split_all.split(line, maxsplit=1)\n if len(parts) == 1: # end\n self.line_data.append(BASIC_Code(parts[0]))\n return\n pre, match, post = parts\n log.debug(\"\\tpre: >>>%r<<<\", pre)\n log.debug(\"\\tmatch: >>>%r<<<\", match)\n log.debug(\"\\tpost: >>>%r<<<\", post)\n\n if match == '\"':\n log.debug(\"%r --> parse STRING\", match)\n self.line_data.append(BASIC_Code(pre))\n string_part, rest = self._parse_string(post)\n self.line_data.append(BASIC_String(match + string_part))\n if rest:\n self._parse_code(rest)\n return\n\n self.line_data.append(BASIC_Code(pre + match))\n\n if match == \"DATA\":\n log.debug(\"%r --> parse DATA\", match)\n data_part, rest = self._parse_data(post)\n self.line_data.append(BASIC_Data(data_part))\n if rest:\n self._parse_code(rest)\n return\n elif match in (\"'\", \"REM\"):\n log.debug(\"%r --> consume rest of the line as COMMENT\", match)\n if post:\n self.line_data.append(BASIC_Comment(post))\n return\n\n raise RuntimeError(\"Wrong Reg.Exp.? match is: %r\" % match)\n"
] | class BASICParser(object):
"""
Split BASIC sourcecode into:
* line number
* Code parts
* DATA
* Strings
* Comments
"""
def __init__(self):
self.regex_line_no = re.compile(
# Split the line number from the code
"^\s*(?P<no>\d+)\s?(?P<content>.+)\s*$",
re.MULTILINE
)
self.regex_split_all = re.compile(
# To split a code line for parse CODE, DATA, STRING or COMMENT
r""" ( " | DATA | REM | ') """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_data = re.compile(
# To consume the complete DATA until " or :
r""" ( " | : ) """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_string = re.compile(
# To consume a string
r""" ( " ) """,
re.VERBOSE | re.MULTILINE
)
def _parse_data(self, line, old_data=""):
"""
Parse a DATA section until : or \n but exclude : in a string part.
e.g.:
10 DATA 1,"FOO:BAR",2:PRINT "NO DATA"
"""
log.debug("*** parse DATA: >>>%r<<< old data: >>>%r<<<", line, old_data)
parts = self.regex_split_data.split(line, maxsplit=1)
if len(parts) == 1: # end
return old_data + parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
pre = old_data + pre
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == ":":
return old_data, match + post
elif match == '"':
string_part, rest = self._parse_string(post)
return self._parse_data(rest, old_data=pre + match + string_part)
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match)
def _parse_string(self, line):
"""
Consume the complete string until next " or \n
"""
log.debug("*** parse STRING: >>>%r<<<", line)
parts = self.regex_split_string.split(line, maxsplit=1)
if len(parts) == 1: # end
return parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
pre = pre + match
log.debug("Parse string result: %r,%r", pre, post)
return pre, post
def _parse_code(self, line):
"""
parse the given BASIC line and branch into DATA, String and
consume a complete Comment
"""
log.debug("*** parse CODE: >>>%r<<<", line)
parts = self.regex_split_all.split(line, maxsplit=1)
if len(parts) == 1: # end
self.line_data.append(BASIC_Code(parts[0]))
return
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == '"':
log.debug("%r --> parse STRING", match)
self.line_data.append(BASIC_Code(pre))
string_part, rest = self._parse_string(post)
self.line_data.append(BASIC_String(match + string_part))
if rest:
self._parse_code(rest)
return
self.line_data.append(BASIC_Code(pre + match))
if match == "DATA":
log.debug("%r --> parse DATA", match)
data_part, rest = self._parse_data(post)
self.line_data.append(BASIC_Data(data_part))
if rest:
self._parse_code(rest)
return
elif match in ("'", "REM"):
log.debug("%r --> consume rest of the line as COMMENT", match)
if post:
self.line_data.append(BASIC_Comment(post))
return
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match)
|
6809/dragonlib | dragonlib/core/basic_parser.py | BASICParser._parse_data | python | def _parse_data(self, line, old_data=""):
log.debug("*** parse DATA: >>>%r<<< old data: >>>%r<<<", line, old_data)
parts = self.regex_split_data.split(line, maxsplit=1)
if len(parts) == 1: # end
return old_data + parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
pre = old_data + pre
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == ":":
return old_data, match + post
elif match == '"':
string_part, rest = self._parse_string(post)
return self._parse_data(rest, old_data=pre + match + string_part)
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match) | Parse a DATA section until : or \n but exclude : in a string part.
e.g.:
10 DATA 1,"FOO:BAR",2:PRINT "NO DATA" | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic_parser.py#L138-L160 | null | class BASICParser(object):
"""
Split BASIC sourcecode into:
* line number
* Code parts
* DATA
* Strings
* Comments
"""
def __init__(self):
self.regex_line_no = re.compile(
# Split the line number from the code
"^\s*(?P<no>\d+)\s?(?P<content>.+)\s*$",
re.MULTILINE
)
self.regex_split_all = re.compile(
# To split a code line for parse CODE, DATA, STRING or COMMENT
r""" ( " | DATA | REM | ') """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_data = re.compile(
# To consume the complete DATA until " or :
r""" ( " | : ) """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_string = re.compile(
# To consume a string
r""" ( " ) """,
re.VERBOSE | re.MULTILINE
)
def parse(self, ascii_listing):
"""
parse the given ASCII BASIC listing.
Return a ParsedBASIC() instance.
"""
self.parsed_lines = ParsedBASIC()
for match in self.regex_line_no.finditer(ascii_listing):
log.info("_" * 79)
log.info("parse line >>>%r<<<", match.group())
line_no = int(match.group("no"))
line_content = match.group("content")
self.line_data = []
self._parse_code(line_content)
log.info("*** line %s result: %r", line_no, self.line_data)
self.parsed_lines[line_no] = self.line_data
return self.parsed_lines
def _parse_string(self, line):
"""
Consume the complete string until next " or \n
"""
log.debug("*** parse STRING: >>>%r<<<", line)
parts = self.regex_split_string.split(line, maxsplit=1)
if len(parts) == 1: # end
return parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
pre = pre + match
log.debug("Parse string result: %r,%r", pre, post)
return pre, post
def _parse_code(self, line):
"""
parse the given BASIC line and branch into DATA, String and
consume a complete Comment
"""
log.debug("*** parse CODE: >>>%r<<<", line)
parts = self.regex_split_all.split(line, maxsplit=1)
if len(parts) == 1: # end
self.line_data.append(BASIC_Code(parts[0]))
return
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == '"':
log.debug("%r --> parse STRING", match)
self.line_data.append(BASIC_Code(pre))
string_part, rest = self._parse_string(post)
self.line_data.append(BASIC_String(match + string_part))
if rest:
self._parse_code(rest)
return
self.line_data.append(BASIC_Code(pre + match))
if match == "DATA":
log.debug("%r --> parse DATA", match)
data_part, rest = self._parse_data(post)
self.line_data.append(BASIC_Data(data_part))
if rest:
self._parse_code(rest)
return
elif match in ("'", "REM"):
log.debug("%r --> consume rest of the line as COMMENT", match)
if post:
self.line_data.append(BASIC_Comment(post))
return
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match)
|
6809/dragonlib | dragonlib/core/basic_parser.py | BASICParser._parse_string | python | def _parse_string(self, line):
log.debug("*** parse STRING: >>>%r<<<", line)
parts = self.regex_split_string.split(line, maxsplit=1)
if len(parts) == 1: # end
return parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
pre = pre + match
log.debug("Parse string result: %r,%r", pre, post)
return pre, post | Consume the complete string until next " or \n | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic_parser.py#L162-L177 | null | class BASICParser(object):
"""
Split BASIC sourcecode into:
* line number
* Code parts
* DATA
* Strings
* Comments
"""
def __init__(self):
self.regex_line_no = re.compile(
# Split the line number from the code
"^\s*(?P<no>\d+)\s?(?P<content>.+)\s*$",
re.MULTILINE
)
self.regex_split_all = re.compile(
# To split a code line for parse CODE, DATA, STRING or COMMENT
r""" ( " | DATA | REM | ') """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_data = re.compile(
# To consume the complete DATA until " or :
r""" ( " | : ) """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_string = re.compile(
# To consume a string
r""" ( " ) """,
re.VERBOSE | re.MULTILINE
)
def parse(self, ascii_listing):
"""
parse the given ASCII BASIC listing.
Return a ParsedBASIC() instance.
"""
self.parsed_lines = ParsedBASIC()
for match in self.regex_line_no.finditer(ascii_listing):
log.info("_" * 79)
log.info("parse line >>>%r<<<", match.group())
line_no = int(match.group("no"))
line_content = match.group("content")
self.line_data = []
self._parse_code(line_content)
log.info("*** line %s result: %r", line_no, self.line_data)
self.parsed_lines[line_no] = self.line_data
return self.parsed_lines
def _parse_data(self, line, old_data=""):
"""
Parse a DATA section until : or \n but exclude : in a string part.
e.g.:
10 DATA 1,"FOO:BAR",2:PRINT "NO DATA"
"""
log.debug("*** parse DATA: >>>%r<<< old data: >>>%r<<<", line, old_data)
parts = self.regex_split_data.split(line, maxsplit=1)
if len(parts) == 1: # end
return old_data + parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
pre = old_data + pre
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == ":":
return old_data, match + post
elif match == '"':
string_part, rest = self._parse_string(post)
return self._parse_data(rest, old_data=pre + match + string_part)
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match)
def _parse_code(self, line):
"""
parse the given BASIC line and branch into DATA, String and
consume a complete Comment
"""
log.debug("*** parse CODE: >>>%r<<<", line)
parts = self.regex_split_all.split(line, maxsplit=1)
if len(parts) == 1: # end
self.line_data.append(BASIC_Code(parts[0]))
return
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == '"':
log.debug("%r --> parse STRING", match)
self.line_data.append(BASIC_Code(pre))
string_part, rest = self._parse_string(post)
self.line_data.append(BASIC_String(match + string_part))
if rest:
self._parse_code(rest)
return
self.line_data.append(BASIC_Code(pre + match))
if match == "DATA":
log.debug("%r --> parse DATA", match)
data_part, rest = self._parse_data(post)
self.line_data.append(BASIC_Data(data_part))
if rest:
self._parse_code(rest)
return
elif match in ("'", "REM"):
log.debug("%r --> consume rest of the line as COMMENT", match)
if post:
self.line_data.append(BASIC_Comment(post))
return
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match)
|
6809/dragonlib | dragonlib/core/basic_parser.py | BASICParser._parse_code | python | def _parse_code(self, line):
log.debug("*** parse CODE: >>>%r<<<", line)
parts = self.regex_split_all.split(line, maxsplit=1)
if len(parts) == 1: # end
self.line_data.append(BASIC_Code(parts[0]))
return
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == '"':
log.debug("%r --> parse STRING", match)
self.line_data.append(BASIC_Code(pre))
string_part, rest = self._parse_string(post)
self.line_data.append(BASIC_String(match + string_part))
if rest:
self._parse_code(rest)
return
self.line_data.append(BASIC_Code(pre + match))
if match == "DATA":
log.debug("%r --> parse DATA", match)
data_part, rest = self._parse_data(post)
self.line_data.append(BASIC_Data(data_part))
if rest:
self._parse_code(rest)
return
elif match in ("'", "REM"):
log.debug("%r --> consume rest of the line as COMMENT", match)
if post:
self.line_data.append(BASIC_Comment(post))
return
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match) | parse the given BASIC line and branch into DATA, String and
consume a complete Comment | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic_parser.py#L179-L218 | [
"def _parse_data(self, line, old_data=\"\"):\n \"\"\"\n Parse a DATA section until : or \\n but exclude : in a string part.\n e.g.:\n 10 DATA 1,\"FOO:BAR\",2:PRINT \"NO DATA\"\n \"\"\"\n log.debug(\"*** parse DATA: >>>%r<<< old data: >>>%r<<<\", line, old_data)\n parts = self.regex_split_data.split(line, maxsplit=1)\n if len(parts) == 1: # end\n return old_data + parts[0], None\n\n pre, match, post = parts\n log.debug(\"\\tpre: >>>%r<<<\", pre)\n pre = old_data + pre\n log.debug(\"\\tmatch: >>>%r<<<\", match)\n log.debug(\"\\tpost: >>>%r<<<\", post)\n if match == \":\":\n return old_data, match + post\n elif match == '\"':\n string_part, rest = self._parse_string(post)\n return self._parse_data(rest, old_data=pre + match + string_part)\n\n raise RuntimeError(\"Wrong Reg.Exp.? match is: %r\" % match)\n",
"def _parse_string(self, line):\n \"\"\"\n Consume the complete string until next \" or \\n\n \"\"\"\n log.debug(\"*** parse STRING: >>>%r<<<\", line)\n parts = self.regex_split_string.split(line, maxsplit=1)\n if len(parts) == 1: # end\n return parts[0], None\n\n pre, match, post = parts\n log.debug(\"\\tpre: >>>%r<<<\", pre)\n log.debug(\"\\tmatch: >>>%r<<<\", match)\n log.debug(\"\\tpost: >>>%r<<<\", post)\n pre = pre + match\n log.debug(\"Parse string result: %r,%r\", pre, post)\n return pre, post\n",
"def _parse_code(self, line):\n \"\"\"\n parse the given BASIC line and branch into DATA, String and\n consume a complete Comment\n \"\"\"\n log.debug(\"*** parse CODE: >>>%r<<<\", line)\n parts = self.regex_split_all.split(line, maxsplit=1)\n if len(parts) == 1: # end\n self.line_data.append(BASIC_Code(parts[0]))\n return\n pre, match, post = parts\n log.debug(\"\\tpre: >>>%r<<<\", pre)\n log.debug(\"\\tmatch: >>>%r<<<\", match)\n log.debug(\"\\tpost: >>>%r<<<\", post)\n\n if match == '\"':\n log.debug(\"%r --> parse STRING\", match)\n self.line_data.append(BASIC_Code(pre))\n string_part, rest = self._parse_string(post)\n self.line_data.append(BASIC_String(match + string_part))\n if rest:\n self._parse_code(rest)\n return\n\n self.line_data.append(BASIC_Code(pre + match))\n\n if match == \"DATA\":\n log.debug(\"%r --> parse DATA\", match)\n data_part, rest = self._parse_data(post)\n self.line_data.append(BASIC_Data(data_part))\n if rest:\n self._parse_code(rest)\n return\n elif match in (\"'\", \"REM\"):\n log.debug(\"%r --> consume rest of the line as COMMENT\", match)\n if post:\n self.line_data.append(BASIC_Comment(post))\n return\n\n raise RuntimeError(\"Wrong Reg.Exp.? match is: %r\" % match)\n"
] | class BASICParser(object):
"""
Split BASIC sourcecode into:
* line number
* Code parts
* DATA
* Strings
* Comments
"""
def __init__(self):
self.regex_line_no = re.compile(
# Split the line number from the code
"^\s*(?P<no>\d+)\s?(?P<content>.+)\s*$",
re.MULTILINE
)
self.regex_split_all = re.compile(
# To split a code line for parse CODE, DATA, STRING or COMMENT
r""" ( " | DATA | REM | ') """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_data = re.compile(
# To consume the complete DATA until " or :
r""" ( " | : ) """,
re.VERBOSE | re.MULTILINE
)
self.regex_split_string = re.compile(
# To consume a string
r""" ( " ) """,
re.VERBOSE | re.MULTILINE
)
def parse(self, ascii_listing):
"""
parse the given ASCII BASIC listing.
Return a ParsedBASIC() instance.
"""
self.parsed_lines = ParsedBASIC()
for match in self.regex_line_no.finditer(ascii_listing):
log.info("_" * 79)
log.info("parse line >>>%r<<<", match.group())
line_no = int(match.group("no"))
line_content = match.group("content")
self.line_data = []
self._parse_code(line_content)
log.info("*** line %s result: %r", line_no, self.line_data)
self.parsed_lines[line_no] = self.line_data
return self.parsed_lines
def _parse_data(self, line, old_data=""):
"""
Parse a DATA section until : or \n but exclude : in a string part.
e.g.:
10 DATA 1,"FOO:BAR",2:PRINT "NO DATA"
"""
log.debug("*** parse DATA: >>>%r<<< old data: >>>%r<<<", line, old_data)
parts = self.regex_split_data.split(line, maxsplit=1)
if len(parts) == 1: # end
return old_data + parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
pre = old_data + pre
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
if match == ":":
return old_data, match + post
elif match == '"':
string_part, rest = self._parse_string(post)
return self._parse_data(rest, old_data=pre + match + string_part)
raise RuntimeError("Wrong Reg.Exp.? match is: %r" % match)
def _parse_string(self, line):
"""
Consume the complete string until next " or \n
"""
log.debug("*** parse STRING: >>>%r<<<", line)
parts = self.regex_split_string.split(line, maxsplit=1)
if len(parts) == 1: # end
return parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
pre = pre + match
log.debug("Parse string result: %r,%r", pre, post)
return pre, post
|
6809/dragonlib | dragonlib/api.py | BaseAPI.program_dump2ascii_lines | python | def program_dump2ascii_lines(self, dump, program_start=None):
dump = bytearray(dump)
# assert isinstance(dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.program_dump2ascii_lines(dump, program_start) | convert a memory dump of a tokensized BASIC listing into
ASCII listing list. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/api.py#L43-L53 | null | class BaseAPI(object):
RENUM_REGEX = r"""
(?P<statement> GOTO|GOSUB|THEN|ELSE ) (?P<space>\s*) (?P<no>[\d*,\s*]+)
"""
def __init__(self):
self.listing = BasicListing(self.BASIC_TOKENS)
self.renum_tool = RenumTool(self.RENUM_REGEX)
self.token_util = BasicTokenUtil(self.BASIC_TOKENS)
def parse_ascii_listing(self, basic_program_ascii):
parser = BASICParser()
parsed_lines = parser.parse(basic_program_ascii)
if not parsed_lines:
log.critical("No parsed lines %s from %s ?!?" % (
repr(parsed_lines), repr(basic_program_ascii)
))
log.debug("Parsed BASIC: %s", repr(parsed_lines))
return parsed_lines
def ascii_listing2basic_lines(self, basic_program_ascii, program_start):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
basic_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
basic_lines.append(basic_line)
return basic_lines
def ascii_listing2program_dump(self, basic_program_ascii, program_start=None):
"""
convert a ASCII BASIC program listing into tokens.
This tokens list can be used to insert it into the
Emulator RAM.
"""
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start)
program_dump=self.listing.basic_lines2program_dump(basic_lines, program_start)
assert isinstance(program_dump, bytearray), (
"is type: %s and not bytearray: %s" % (type(program_dump), repr(program_dump))
)
return program_dump
def pformat_tokens(self, tokens):
"""
format a tokenized BASIC program line. Useful for debugging.
returns a list of formated string lines.
"""
return self.listing.token_util.pformat_tokens(tokens)
def pformat_program_dump(self, program_dump, program_start=None):
"""
format a BASIC program dump. Useful for debugging.
returns a list of formated string lines.
"""
assert isinstance(program_dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.pformat_program_dump(program_dump, program_start)
def renum_ascii_listing(self, content):
return self.renum_tool.renum(content)
def reformat_ascii_listing(self, basic_program_ascii):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
ascii_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
print()
print(line_no, code_objects)
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
print(basic_line)
basic_line.reformat()
new_line = basic_line.get_content()
print(new_line)
ascii_lines.append(new_line)
return "\n".join(ascii_lines)
def bas2bin(self, basic_program_ascii, load_address=None, exec_address=None):
# FIXME: load_address/exec_address == program_start ?!?!
if load_address is None:
load_address = self.DEFAULT_PROGRAM_START
if exec_address is None:
exec_address = self.DEFAULT_PROGRAM_START
tokenised_dump = self.ascii_listing2program_dump(basic_program_ascii, load_address)
log.debug(type(tokenised_dump))
log.debug(repr(tokenised_dump))
log_bytes(tokenised_dump, msg="tokenised: %s")
binary_file = BinaryFile()
binary_file.load_tokenised_dump(tokenised_dump,
load_address=load_address,
exec_address=exec_address,
)
binary_file.debug2log(level=logging.CRITICAL)
data = binary_file.dump_DragonDosBinary()
return data
def bin2bas(self, data):
"""
convert binary files to a ASCII basic string.
Supported are:
* Dragon DOS Binary Format
* TODO: CoCo DECB (Disk Extended Color BASIC) Format
see:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139
"""
data = bytearray(data)
binary_file = BinaryFile()
binary_file.load_from_bin(data)
if binary_file.file_type != 0x01:
log.error("ERROR: file type $%02X is not $01 (tokenised BASIC)!", binary_file.file_type)
ascii_lines = self.program_dump2ascii_lines(dump=binary_file.data,
# FIXME:
#program_start=bin.exec_address
program_start=binary_file.load_address
)
return "\n".join(ascii_lines)
|
6809/dragonlib | dragonlib/api.py | BaseAPI.ascii_listing2program_dump | python | def ascii_listing2program_dump(self, basic_program_ascii, program_start=None):
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start)
program_dump=self.listing.basic_lines2program_dump(basic_lines, program_start)
assert isinstance(program_dump, bytearray), (
"is type: %s and not bytearray: %s" % (type(program_dump), repr(program_dump))
)
return program_dump | convert a ASCII BASIC program listing into tokens.
This tokens list can be used to insert it into the
Emulator RAM. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/api.py#L76-L91 | [
"def ascii_listing2basic_lines(self, basic_program_ascii, program_start):\n parsed_lines = self.parse_ascii_listing(basic_program_ascii)\n\n basic_lines = []\n for line_no, code_objects in sorted(parsed_lines.items()):\n basic_line = BasicLine(self.token_util)\n basic_line.code_objects_load(line_no,code_objects)\n basic_lines.append(basic_line)\n\n return basic_lines\n"
] | class BaseAPI(object):
RENUM_REGEX = r"""
(?P<statement> GOTO|GOSUB|THEN|ELSE ) (?P<space>\s*) (?P<no>[\d*,\s*]+)
"""
def __init__(self):
self.listing = BasicListing(self.BASIC_TOKENS)
self.renum_tool = RenumTool(self.RENUM_REGEX)
self.token_util = BasicTokenUtil(self.BASIC_TOKENS)
def program_dump2ascii_lines(self, dump, program_start=None):
"""
convert a memory dump of a tokensized BASIC listing into
ASCII listing list.
"""
dump = bytearray(dump)
# assert isinstance(dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.program_dump2ascii_lines(dump, program_start)
def parse_ascii_listing(self, basic_program_ascii):
parser = BASICParser()
parsed_lines = parser.parse(basic_program_ascii)
if not parsed_lines:
log.critical("No parsed lines %s from %s ?!?" % (
repr(parsed_lines), repr(basic_program_ascii)
))
log.debug("Parsed BASIC: %s", repr(parsed_lines))
return parsed_lines
def ascii_listing2basic_lines(self, basic_program_ascii, program_start):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
basic_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
basic_lines.append(basic_line)
return basic_lines
def pformat_tokens(self, tokens):
"""
format a tokenized BASIC program line. Useful for debugging.
returns a list of formated string lines.
"""
return self.listing.token_util.pformat_tokens(tokens)
def pformat_program_dump(self, program_dump, program_start=None):
"""
format a BASIC program dump. Useful for debugging.
returns a list of formated string lines.
"""
assert isinstance(program_dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.pformat_program_dump(program_dump, program_start)
def renum_ascii_listing(self, content):
return self.renum_tool.renum(content)
def reformat_ascii_listing(self, basic_program_ascii):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
ascii_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
print()
print(line_no, code_objects)
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
print(basic_line)
basic_line.reformat()
new_line = basic_line.get_content()
print(new_line)
ascii_lines.append(new_line)
return "\n".join(ascii_lines)
def bas2bin(self, basic_program_ascii, load_address=None, exec_address=None):
# FIXME: load_address/exec_address == program_start ?!?!
if load_address is None:
load_address = self.DEFAULT_PROGRAM_START
if exec_address is None:
exec_address = self.DEFAULT_PROGRAM_START
tokenised_dump = self.ascii_listing2program_dump(basic_program_ascii, load_address)
log.debug(type(tokenised_dump))
log.debug(repr(tokenised_dump))
log_bytes(tokenised_dump, msg="tokenised: %s")
binary_file = BinaryFile()
binary_file.load_tokenised_dump(tokenised_dump,
load_address=load_address,
exec_address=exec_address,
)
binary_file.debug2log(level=logging.CRITICAL)
data = binary_file.dump_DragonDosBinary()
return data
def bin2bas(self, data):
"""
convert binary files to a ASCII basic string.
Supported are:
* Dragon DOS Binary Format
* TODO: CoCo DECB (Disk Extended Color BASIC) Format
see:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139
"""
data = bytearray(data)
binary_file = BinaryFile()
binary_file.load_from_bin(data)
if binary_file.file_type != 0x01:
log.error("ERROR: file type $%02X is not $01 (tokenised BASIC)!", binary_file.file_type)
ascii_lines = self.program_dump2ascii_lines(dump=binary_file.data,
# FIXME:
#program_start=bin.exec_address
program_start=binary_file.load_address
)
return "\n".join(ascii_lines)
|
6809/dragonlib | dragonlib/api.py | BaseAPI.pformat_program_dump | python | def pformat_program_dump(self, program_dump, program_start=None):
assert isinstance(program_dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.pformat_program_dump(program_dump, program_start) | format a BASIC program dump. Useful for debugging.
returns a list of formated string lines. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/api.py#L100-L109 | null | class BaseAPI(object):
RENUM_REGEX = r"""
(?P<statement> GOTO|GOSUB|THEN|ELSE ) (?P<space>\s*) (?P<no>[\d*,\s*]+)
"""
def __init__(self):
self.listing = BasicListing(self.BASIC_TOKENS)
self.renum_tool = RenumTool(self.RENUM_REGEX)
self.token_util = BasicTokenUtil(self.BASIC_TOKENS)
def program_dump2ascii_lines(self, dump, program_start=None):
"""
convert a memory dump of a tokensized BASIC listing into
ASCII listing list.
"""
dump = bytearray(dump)
# assert isinstance(dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.program_dump2ascii_lines(dump, program_start)
def parse_ascii_listing(self, basic_program_ascii):
parser = BASICParser()
parsed_lines = parser.parse(basic_program_ascii)
if not parsed_lines:
log.critical("No parsed lines %s from %s ?!?" % (
repr(parsed_lines), repr(basic_program_ascii)
))
log.debug("Parsed BASIC: %s", repr(parsed_lines))
return parsed_lines
def ascii_listing2basic_lines(self, basic_program_ascii, program_start):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
basic_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
basic_lines.append(basic_line)
return basic_lines
def ascii_listing2program_dump(self, basic_program_ascii, program_start=None):
"""
convert a ASCII BASIC program listing into tokens.
This tokens list can be used to insert it into the
Emulator RAM.
"""
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start)
program_dump=self.listing.basic_lines2program_dump(basic_lines, program_start)
assert isinstance(program_dump, bytearray), (
"is type: %s and not bytearray: %s" % (type(program_dump), repr(program_dump))
)
return program_dump
def pformat_tokens(self, tokens):
"""
format a tokenized BASIC program line. Useful for debugging.
returns a list of formated string lines.
"""
return self.listing.token_util.pformat_tokens(tokens)
def renum_ascii_listing(self, content):
return self.renum_tool.renum(content)
def reformat_ascii_listing(self, basic_program_ascii):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
ascii_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
print()
print(line_no, code_objects)
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
print(basic_line)
basic_line.reformat()
new_line = basic_line.get_content()
print(new_line)
ascii_lines.append(new_line)
return "\n".join(ascii_lines)
def bas2bin(self, basic_program_ascii, load_address=None, exec_address=None):
# FIXME: load_address/exec_address == program_start ?!?!
if load_address is None:
load_address = self.DEFAULT_PROGRAM_START
if exec_address is None:
exec_address = self.DEFAULT_PROGRAM_START
tokenised_dump = self.ascii_listing2program_dump(basic_program_ascii, load_address)
log.debug(type(tokenised_dump))
log.debug(repr(tokenised_dump))
log_bytes(tokenised_dump, msg="tokenised: %s")
binary_file = BinaryFile()
binary_file.load_tokenised_dump(tokenised_dump,
load_address=load_address,
exec_address=exec_address,
)
binary_file.debug2log(level=logging.CRITICAL)
data = binary_file.dump_DragonDosBinary()
return data
def bin2bas(self, data):
"""
convert binary files to a ASCII basic string.
Supported are:
* Dragon DOS Binary Format
* TODO: CoCo DECB (Disk Extended Color BASIC) Format
see:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139
"""
data = bytearray(data)
binary_file = BinaryFile()
binary_file.load_from_bin(data)
if binary_file.file_type != 0x01:
log.error("ERROR: file type $%02X is not $01 (tokenised BASIC)!", binary_file.file_type)
ascii_lines = self.program_dump2ascii_lines(dump=binary_file.data,
# FIXME:
#program_start=bin.exec_address
program_start=binary_file.load_address
)
return "\n".join(ascii_lines)
|
6809/dragonlib | dragonlib/api.py | BaseAPI.bin2bas | python | def bin2bas(self, data):
data = bytearray(data)
binary_file = BinaryFile()
binary_file.load_from_bin(data)
if binary_file.file_type != 0x01:
log.error("ERROR: file type $%02X is not $01 (tokenised BASIC)!", binary_file.file_type)
ascii_lines = self.program_dump2ascii_lines(dump=binary_file.data,
# FIXME:
#program_start=bin.exec_address
program_start=binary_file.load_address
)
return "\n".join(ascii_lines) | convert binary files to a ASCII basic string.
Supported are:
* Dragon DOS Binary Format
* TODO: CoCo DECB (Disk Extended Color BASIC) Format
see:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139 | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/api.py#L156-L179 | [
"def program_dump2ascii_lines(self, dump, program_start=None):\n \"\"\"\n convert a memory dump of a tokensized BASIC listing into\n ASCII listing list.\n \"\"\"\n dump = bytearray(dump)\n # assert isinstance(dump, bytearray)\n\n if program_start is None:\n program_start = self.DEFAULT_PROGRAM_START\n return self.listing.program_dump2ascii_lines(dump, program_start)\n",
"def load_from_bin(self, data):\n \"\"\"\n convert binary files to a ASCII basic string.\n Supported are:\n * Dragon DOS Binary Format\n * CoCo DECB (Disk Extended Color BASIC) Format\n\n see:\n http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139\n \"\"\"\n data = bytearray(data)\n\n machine_type = data[0]\n\n # machine_type = struct.unpack(\"B\", bin[0])[0]\n if machine_type == 0x55:\n # Dragon DOS Binary Format\n self.load_DragonDosBinary(data)\n elif machine_type == 0x00:\n raise NotImplementedError(\"CoCo DECB (Disk Extended Color BASIC) Format not supported, yet.\")\n else:\n raise NotImplementedError(\"ERROR: Format $%02X unknown.\" % machine_type)\n"
] | class BaseAPI(object):
RENUM_REGEX = r"""
(?P<statement> GOTO|GOSUB|THEN|ELSE ) (?P<space>\s*) (?P<no>[\d*,\s*]+)
"""
def __init__(self):
self.listing = BasicListing(self.BASIC_TOKENS)
self.renum_tool = RenumTool(self.RENUM_REGEX)
self.token_util = BasicTokenUtil(self.BASIC_TOKENS)
def program_dump2ascii_lines(self, dump, program_start=None):
"""
convert a memory dump of a tokensized BASIC listing into
ASCII listing list.
"""
dump = bytearray(dump)
# assert isinstance(dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.program_dump2ascii_lines(dump, program_start)
def parse_ascii_listing(self, basic_program_ascii):
parser = BASICParser()
parsed_lines = parser.parse(basic_program_ascii)
if not parsed_lines:
log.critical("No parsed lines %s from %s ?!?" % (
repr(parsed_lines), repr(basic_program_ascii)
))
log.debug("Parsed BASIC: %s", repr(parsed_lines))
return parsed_lines
def ascii_listing2basic_lines(self, basic_program_ascii, program_start):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
basic_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
basic_lines.append(basic_line)
return basic_lines
def ascii_listing2program_dump(self, basic_program_ascii, program_start=None):
"""
convert a ASCII BASIC program listing into tokens.
This tokens list can be used to insert it into the
Emulator RAM.
"""
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start)
program_dump=self.listing.basic_lines2program_dump(basic_lines, program_start)
assert isinstance(program_dump, bytearray), (
"is type: %s and not bytearray: %s" % (type(program_dump), repr(program_dump))
)
return program_dump
def pformat_tokens(self, tokens):
"""
format a tokenized BASIC program line. Useful for debugging.
returns a list of formated string lines.
"""
return self.listing.token_util.pformat_tokens(tokens)
def pformat_program_dump(self, program_dump, program_start=None):
"""
format a BASIC program dump. Useful for debugging.
returns a list of formated string lines.
"""
assert isinstance(program_dump, bytearray)
if program_start is None:
program_start = self.DEFAULT_PROGRAM_START
return self.listing.pformat_program_dump(program_dump, program_start)
def renum_ascii_listing(self, content):
return self.renum_tool.renum(content)
def reformat_ascii_listing(self, basic_program_ascii):
parsed_lines = self.parse_ascii_listing(basic_program_ascii)
ascii_lines = []
for line_no, code_objects in sorted(parsed_lines.items()):
print()
print(line_no, code_objects)
basic_line = BasicLine(self.token_util)
basic_line.code_objects_load(line_no,code_objects)
print(basic_line)
basic_line.reformat()
new_line = basic_line.get_content()
print(new_line)
ascii_lines.append(new_line)
return "\n".join(ascii_lines)
def bas2bin(self, basic_program_ascii, load_address=None, exec_address=None):
# FIXME: load_address/exec_address == program_start ?!?!
if load_address is None:
load_address = self.DEFAULT_PROGRAM_START
if exec_address is None:
exec_address = self.DEFAULT_PROGRAM_START
tokenised_dump = self.ascii_listing2program_dump(basic_program_ascii, load_address)
log.debug(type(tokenised_dump))
log.debug(repr(tokenised_dump))
log_bytes(tokenised_dump, msg="tokenised: %s")
binary_file = BinaryFile()
binary_file.load_tokenised_dump(tokenised_dump,
load_address=load_address,
exec_address=exec_address,
)
binary_file.debug2log(level=logging.CRITICAL)
data = binary_file.dump_DragonDosBinary()
return data
|
6809/dragonlib | dragonlib/utils/logging_utils.py | setup_logging | python | def setup_logging(level, logger_name=None, handler=None, log_formatter=None):
root_logger = logging.getLogger()
if logger_name is None:
logger = root_logger
root_logger.info("Set %i level to root logger", level)
else:
logger = logging.getLogger(logger_name)
root_logger.info("Set %i level to logger %r", level, logger_name)
if level == 100:
# Remove all existing handlers and set only NullHandler():
set_handler(logger, logging.NullHandler())
logger.disabled = True
return
logger.setLevel(level=level)
if log_formatter is None:
log_formatter = "%(relativeCreated)-5d %(levelname)8s %(module)13s %(lineno)d %(message)s"
formatter = logging.Formatter(log_formatter)
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
if hasattr(handler, "baseFilename"):
root_logger.debug("Log to file: %s (%s)", handler.baseFilename, repr(handler))
else:
root_logger.debug("Log to handler: %s", repr(handler))
# Remove all existing handlers and set only the given handler:
set_handler(logger, handler)
log.log(level, "Set logging to level %i %s", level, logging.getLevelName(level)) | levels:
1 - hardcode DEBUG ;)
10 - DEBUG
20 - INFO
30 - WARNING
40 - ERROR
50 - CRITICAL/FATAL
99 - nearly off
100 - complete off | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/utils/logging_utils.py#L55-L100 | [
"def set_handler(logger, handler):\n \"\"\"\n Remove all existing log handler and set\n only the given handler.\n \"\"\"\n logger.handlers = []\n logger.addHandler(handler)\n"
] | #!/usr/bin/env python
# encoding:utf-8
"""
loggin utilities
~~~~~~~~~~~~~~~~
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import six
from dragonlib.utils.byte_word_values import bin2hexline
xrange = six.moves.xrange
import logging
import sys
log = logging.getLogger(__name__)
# log.critical("Log handlers: %s", repr(log.handlers))
# if len(log.handlers) > 1: # FIXME: tro avoid doublicated output
# log.handlers = (log.handlers[0],)
# log.critical("Fixed Log handlers: %s", repr(log.handlers))
def get_log_levels(additional_levels=[100,99]):
levels = additional_levels[:]
try:
# Python 3
levels += logging._nameToLevel.values()
except AttributeError:
# Python 2
levels += [level for level in logging._levelNames if isinstance(level, int)]
levels.sort()
return levels
LOG_LEVELS = get_log_levels()
def set_handler(logger, handler):
"""
Remove all existing log handler and set
only the given handler.
"""
logger.handlers = []
logger.addHandler(handler)
def log_memory_dump(memory, start, end, mem_info, level=99):
log.log(level, "Memory dump from $%04x to $%04x:", start, end)
for addr in xrange(start, end + 1):
value = memory[addr]
if isinstance(value, int):
msg = "$%04x: $%02x (dez: %i)" % (addr, value, value)
else:
msg = "$%04x: %s (is type: %s)" % (addr, repr(value), type(value))
msg = "%-25s| %s" % (
msg, mem_info.get_shortest(addr)
)
log.log(level, "\t%s", msg)
def pformat_hex_list(hex_list):
return " ".join(["$%x" % v for v in hex_list])
def pformat_byte_hex_list(hex_list):
return " ".join(["$%02x" % v for v in hex_list])
def pformat_word_hex_list(hex_list):
return " ".join(["$%02x" % v for v in hex_list])
def log_hexlist(byte_list, group=8, start=0x0000, level=99):
def _log(level, addr, line):
msg = pformat_byte_hex_list(line)
msg = "%04x - %s" % (addr, msg)
log.log(level, msg)
pos = 0
addr = start
line = []
for value in byte_list:
pos += 1
line.append(value)
if pos >= group:
_log(level, addr, line)
addr += pos
pos = 0
line = []
_log(level, addr, line)
def pformat_program_dump(ram_content):
msg = pformat_byte_hex_list(ram_content)
msg = msg.replace("$00 ", "\n$00\n")
return msg
def log_program_dump(ram_content, level=99):
msg = "BASIC program dump:\n"
msg += pformat_program_dump(ram_content)
log.log(level, msg)
def log_bytes(data, msg="%s", level=logging.DEBUG):
data = bytearray(data)
data = " ".join(["%02X" % item for item in data])
log.log(level, msg, data)
def log_hexlines(data, msg="Data:", level=logging.DEBUG, width=16):
log.log(level, msg)
for line in bin2hexline(data, width):
log.log(level, line)
def test_run():
import os
import subprocess
cmd_args = [
sys.executable,
os.path.join("..", "DragonPy_CLI.py"),
# "-h"
# "--log_list",
"--verbosity", "50",
"--log", "DragonPy.cpu6809,50;dragonpy.Dragon32.MC6821_PIA,40",
# "--verbosity", " 1", # hardcode DEBUG ;)
# "--verbosity", "10", # DEBUG
# "--verbosity", "20", # INFO
# "--verbosity", "30", # WARNING
# "--verbosity", "40", # ERROR
# "--verbosity", "50", # CRITICAL/FATAL
# "--verbosity", "99", # nearly all off
"--machine", "Dragon32", "run",
# "--machine", "Vectrex", "run",
# "--max_ops", "1",
# "--trace",
]
print("Startup CLI with: %s" % " ".join(cmd_args[1:]))
subprocess.Popen(cmd_args, cwd="..").wait()
if __name__ == "__main__":
dump = (0x1e, 0x07, 0x00, 0x0a, 0xa0, 0x00, 0x1e, 0x1a, 0x00, 0x14, 0x80, 0x20, 0x49, 0x20, 0xcb, 0x20, 0x30, 0x20, 0xbc, 0x20, 0x32, 0x35, 0x35, 0x3a, 0x00, 0x1e, 0x2d, 0x00, 0x1e, 0x93, 0x20, 0x31, 0x30, 0x32, 0x34, 0xc3, 0x28, 0x49, 0xc5, 0x32, 0x29, 0x2c, 0x49, 0x00, 0x1e, 0x35, 0x00, 0x28, 0x8b, 0x20, 0x49, 0x00, 0x1e, 0x4e, 0x00, 0x32, 0x49, 0x24, 0x20, 0xcb, 0x20, 0xff, 0x9a, 0x3a, 0x85, 0x20, 0x49, 0x24, 0xcb, 0x22, 0x22, 0x20, 0xbf, 0x20, 0x35, 0x30, 0x00, 0x00, 0x00)
log_hexlist(dump)
# log_hexlist(dump, group=4)
# log_hexlist(dump, group=5)
test_run()
|
6809/dragonlib | dragonlib/core/basic.py | BasicTokenUtil.ascii2token | python | def ascii2token(self, ascii_code, debug=False):
log.info(repr(ascii_code))
parts = self.regex.split(ascii_code)
log.info(repr(parts))
tokens = []
for part in parts:
if not part:
continue
if part in self.ascii2token_dict:
new_token = self.ascii2token_dict[part]
log.info("\t%r -> %x", part, new_token)
if new_token > 0xff:
tokens.append(new_token >> 8)
tokens.append(new_token & 0xff)
else:
tokens.append(new_token)
else:
tokens += self.chars2tokens(part)
return tokens | TODO: replace no tokens in comments and strings | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic.py#L75-L97 | [
"def chars2tokens(self, chars):\n return [ord(char) for char in chars]\n"
] | class BasicTokenUtil(object):
def __init__(self, basic_token_dict):
self.basic_token_dict = basic_token_dict
self.ascii2token_dict = dict([
(code, token)
for token, code in list(basic_token_dict.items())
])
regex = r"(%s)" % "|".join([
re.escape(statement)
for statement in sorted(list(self.basic_token_dict.values()), key=len, reverse=True)
])
self.regex = re.compile(regex)
def token2ascii(self, value):
try:
result = self.basic_token_dict[value]
except KeyError:
if value > 0xff:
log.info("ERROR: Token $%04x is not in BASIC_TOKENS!", value)
return ""
result = chr(value)
if six.PY2:
# Only for unittest, to avoid token representation as u"..."
# There is only ASCII characters possible
return str(result)
else:
return result
def tokens2ascii(self, values):
line=""
old_value = None
for value in values:
if value == 0xff:
old_value = value
continue
if old_value is not None:
value = (old_value << 8) + value
old_value = None
code = self.token2ascii(value)
line += code
return line
def chars2tokens(self, chars):
return [ord(char) for char in chars]
def code_objects2token(self, code_objects):
tokens = []
for code_object in code_objects:
if code_object.PART_TYPE == basic_parser.CODE_TYPE_CODE:
# Code part
content = code_object.content
"""
NOTE: The BASIC interpreter changed REM shortcut and ELSE
internaly:
"'" <-> ":'"
"ELSE" <-> ":ELSE"
See also:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4310&p=11632#p11630
"""
log.info("replace ' and ELSE with :' and :ELSE")
content = content.replace("'", ":'")
content = content.replace("ELSE", ":ELSE")
tokens += self.ascii2token(content)
else:
# Strings, Comments or DATA
tokens += self.chars2tokens(code_object.content)
return tokens
def iter_token_values(self, tokens):
token_value = None
for token in tokens:
if token == 0xff:
token_value = token
continue
if token_value is not None:
yield (token_value << 8) + token
token_value = None
else:
yield token
def pformat_tokens(self, tokens):
"""
format a tokenized BASIC program line. Useful for debugging.
returns a list of formated string lines.
"""
result = []
for token_value in self.iter_token_values(tokens):
char = self.token2ascii(token_value)
if token_value > 0xff:
result.append("\t$%04x -> %s" % (token_value, repr(char)))
else:
result.append("\t $%02x -> %s" % (token_value, repr(char)))
return result
|
6809/dragonlib | dragonlib/core/basic.py | BasicTokenUtil.pformat_tokens | python | def pformat_tokens(self, tokens):
result = []
for token_value in self.iter_token_values(tokens):
char = self.token2ascii(token_value)
if token_value > 0xff:
result.append("\t$%04x -> %s" % (token_value, repr(char)))
else:
result.append("\t $%02x -> %s" % (token_value, repr(char)))
return result | format a tokenized BASIC program line. Useful for debugging.
returns a list of formated string lines. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic.py#L136-L149 | [
"def token2ascii(self, value):\n try:\n result = self.basic_token_dict[value]\n except KeyError:\n if value > 0xff:\n log.info(\"ERROR: Token $%04x is not in BASIC_TOKENS!\", value)\n return \"\"\n result = chr(value)\n if six.PY2:\n # Only for unittest, to avoid token representation as u\"...\"\n # There is only ASCII characters possible\n return str(result)\n else:\n return result\n",
"def iter_token_values(self, tokens):\n token_value = None\n for token in tokens:\n if token == 0xff:\n token_value = token\n continue\n\n if token_value is not None:\n yield (token_value << 8) + token\n token_value = None\n else:\n yield token\n"
] | class BasicTokenUtil(object):
def __init__(self, basic_token_dict):
self.basic_token_dict = basic_token_dict
self.ascii2token_dict = dict([
(code, token)
for token, code in list(basic_token_dict.items())
])
regex = r"(%s)" % "|".join([
re.escape(statement)
for statement in sorted(list(self.basic_token_dict.values()), key=len, reverse=True)
])
self.regex = re.compile(regex)
def token2ascii(self, value):
try:
result = self.basic_token_dict[value]
except KeyError:
if value > 0xff:
log.info("ERROR: Token $%04x is not in BASIC_TOKENS!", value)
return ""
result = chr(value)
if six.PY2:
# Only for unittest, to avoid token representation as u"..."
# There is only ASCII characters possible
return str(result)
else:
return result
def tokens2ascii(self, values):
line=""
old_value = None
for value in values:
if value == 0xff:
old_value = value
continue
if old_value is not None:
value = (old_value << 8) + value
old_value = None
code = self.token2ascii(value)
line += code
return line
def chars2tokens(self, chars):
return [ord(char) for char in chars]
def ascii2token(self, ascii_code, debug=False):
"""
TODO: replace no tokens in comments and strings
"""
log.info(repr(ascii_code))
parts = self.regex.split(ascii_code)
log.info(repr(parts))
tokens = []
for part in parts:
if not part:
continue
if part in self.ascii2token_dict:
new_token = self.ascii2token_dict[part]
log.info("\t%r -> %x", part, new_token)
if new_token > 0xff:
tokens.append(new_token >> 8)
tokens.append(new_token & 0xff)
else:
tokens.append(new_token)
else:
tokens += self.chars2tokens(part)
return tokens
def code_objects2token(self, code_objects):
tokens = []
for code_object in code_objects:
if code_object.PART_TYPE == basic_parser.CODE_TYPE_CODE:
# Code part
content = code_object.content
"""
NOTE: The BASIC interpreter changed REM shortcut and ELSE
internaly:
"'" <-> ":'"
"ELSE" <-> ":ELSE"
See also:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4310&p=11632#p11630
"""
log.info("replace ' and ELSE with :' and :ELSE")
content = content.replace("'", ":'")
content = content.replace("ELSE", ":ELSE")
tokens += self.ascii2token(content)
else:
# Strings, Comments or DATA
tokens += self.chars2tokens(code_object.content)
return tokens
def iter_token_values(self, tokens):
token_value = None
for token in tokens:
if token == 0xff:
token_value = token
continue
if token_value is not None:
yield (token_value << 8) + token
token_value = None
else:
yield token
|
6809/dragonlib | dragonlib/core/basic.py | BasicLine.token_load | python | def token_load(self, line_number, tokens):
self.line_number = line_number
assert tokens[-1] == 0x00, "line code %s doesn't ends with \\x00: %s" % (
repr(tokens), repr(tokens[-1])
)
for src, dst in self.tokens_replace_rules:
log.info("Relace tokens %s with $%02x",
pformat_byte_hex_list(src), dst
)
log.debug("Before..: %s", pformat_byte_hex_list(tokens))
tokens = list_replace(tokens, src, dst)
log.debug("After...: %s", pformat_byte_hex_list(tokens))
self.line_code = tokens[:-1] | NOTE: The BASIC interpreter changed REM shortcut and ELSE
internaly:
"'" <-> ":'"
"ELSE" <-> ":ELSE"
See also:
http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4310&p=11632#p11630 | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic.py#L169-L192 | [
"def list_replace(iterable, src, dst):\n \"\"\"\n Thanks to \"EyDu\":\n http://www.python-forum.de/viewtopic.php?f=1&t=34539 (de)\n\n >>> list_replace([1,2,3], (1,2), \"X\")\n ['X', 3]\n\n >>> list_replace([1,2,3,4], (2,3), 9)\n [1, 9, 4]\n\n >>> list_replace([1,2,3], (2,), [9,8])\n [1, 9, 8, 3]\n\n >>> list_replace([1,2,3,4,5], (2,3,4), \"X\")\n [1, 'X', 5]\n\n >>> list_replace([1,2,3,4,5], (4,5), \"X\")\n [1, 2, 3, 'X']\n\n >>> list_replace([1,2,3,4,5], (1,2), \"X\")\n ['X', 3, 4, 5]\n\n >>> list_replace([1,2,3,3,3,4,5], (3,3), \"X\")\n [1, 2, 'X', 3, 4, 5]\n\n >>> list_replace([1,2,3,3,3,4,5], (3,3), (\"A\",\"B\",\"C\"))\n [1, 2, 'A', 'B', 'C', 3, 4, 5]\n\n >>> list_replace((58, 131, 73, 70), (58, 131), 131)\n [131, 73, 70]\n \"\"\"\n result=[]\n iterable=list(iterable)\n\n try:\n dst=list(dst)\n except TypeError: # e.g.: int\n dst=[dst]\n\n src=list(src)\n src_len=len(src)\n index = 0\n while index < len(iterable):\n element = iterable[index:index+src_len]\n# print element, src\n if element == src:\n result += dst\n index += src_len\n else:\n result.append(iterable[index])\n index += 1\n return result\n",
"def pformat_byte_hex_list(hex_list):\n return \" \".join([\"$%02x\" % v for v in hex_list])\n"
] | class BasicLine(object):
def __init__(self, token_util):
self.token_util = token_util
self.line_number = None
self.line_code = None
try:
colon_token = self.token_util.ascii2token_dict[":"]
except KeyError: # XXX: Always not defined as token?
colon_token = ord(":")
rem_token = self.token_util.ascii2token_dict["'"]
else_token = self.token_util.ascii2token_dict["ELSE"]
self.tokens_replace_rules = (
((colon_token, rem_token), rem_token),
((colon_token, else_token), else_token),
)
# rstrip \x00
def ascii_load(self, line_ascii):
try:
line_number, ascii_code = line_ascii.split(" ", 1)
except ValueError as err:
msg = "Error split line number and code in line: %r (Origin error: %s)" % (
line_ascii, err
)
raise ValueError(msg)
self.line_number = int(line_number)
self.line_code = self.token_util.ascii2token(ascii_code)
def code_objects_load(self, line_number, code_objects):
self.line_number = line_number
self.line_code = self.token_util.code_objects2token(code_objects)
def get_tokens(self):
"""
return two bytes line number + the code
"""
return list(word2bytes(self.line_number)) + self.line_code
def reformat(self):
# TODO: Use BASICParser to exclude string/comments etc.
space = self.token_util.ascii2token(" ")[0]
to_split=self.token_util.basic_token_dict.copy()
dont_split_tokens=self.token_util.ascii2token(":()+-*/^<=>")
for token_value in dont_split_tokens:
try:
del(to_split[token_value])
except KeyError: # e.g.: () are not tokens
pass
tokens=tuple(self.token_util.iter_token_values(self.line_code))
temp = []
was_token=False
for no, token in enumerate(tokens):
try:
next_token=tokens[no+1]
except IndexError:
next_token=None
if token in to_split:
log.debug("X%sX" % to_split[token])
try:
if temp[-1]!=space:
temp.append(space)
except IndexError:
pass
temp.append(token)
if not (next_token and next_token in dont_split_tokens):
temp.append(space)
was_token=True
else:
if was_token and token==space:
was_token=False
continue
log.debug("Y%rY" % self.token_util.tokens2ascii([token]))
temp.append(token)
temp = list_replace(temp, self.token_util.ascii2token("GO TO"), self.token_util.ascii2token("GOTO"))
temp = list_replace(temp, self.token_util.ascii2token("GO SUB"), self.token_util.ascii2token("GOSUB"))
temp = list_replace(temp, self.token_util.ascii2token(": "), self.token_util.ascii2token(":"))
temp = list_replace(temp, self.token_util.ascii2token("( "), self.token_util.ascii2token("("))
temp = list_replace(temp, self.token_util.ascii2token(", "), self.token_util.ascii2token(","))
self.line_code = temp
def get_content(self, code=None):
if code is None: # start
code = self.line_code
line = "%i " % self.line_number
line += self.token_util.tokens2ascii(code)
return line
def __repr__(self):
return "%r: %s" % (self.get_content(), " ".join(["$%02x" % t for t in self.line_code]))
def log_line(self):
log.critical("%r:\n\t%s",
self.get_content(),
"\n\t".join(self.token_util.pformat_tokens(self.line_code))
)
|
6809/dragonlib | dragonlib/core/basic.py | BasicListing.pformat_program_dump | python | def pformat_program_dump(self, program_dump, program_start, formated_dump=None):
if formated_dump is None:
formated_dump = []
formated_dump.append(
"program start address: $%04x" % program_start
)
assert isinstance(program_dump, bytearray)
if not program_dump:
return program_dump
try:
next_address = (program_dump[0] << 8) + program_dump[1]
except IndexError as err:
raise IndexError(
"Can't get next address from: %s program start: $%04x (Origin error: %s)" % (
repr(program_dump), program_start, err
))
if next_address == 0x0000:
formated_dump.append("$%04x -> end address" % next_address)
return formated_dump
assert next_address > program_start, "Next address $%04x not bigger than program start $%04x ?!?" % (
next_address, program_start
)
length = next_address - program_start
formated_dump.append(
"$%04x -> next address (length: %i)" % (next_address, length)
)
line_number = (program_dump[2] << 8) + program_dump[3]
formated_dump.append("$%04x -> %i (line number)" % (line_number, line_number))
tokens = program_dump[4:length]
formated_dump.append("tokens:")
formated_dump += self.token_util.pformat_tokens(tokens)
return self.pformat_program_dump(program_dump[length:], next_address, formated_dump) | format a BASIC program dump. Useful for debugging.
returns a list of formated string lines. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic.py#L353-L396 | null | class BasicListing(object):
def __init__(self, basic_token_dict):
self.token_util = BasicTokenUtil(basic_token_dict)
def dump2basic_lines(self, dump, program_start, basic_lines=None):
if basic_lines is None:
basic_lines = []
log.debug("progam start $%04x", program_start)
try:
next_address = (dump[0] << 8) + dump[1]
except IndexError as err:
log.debug("Can't get address: %s", err)
return basic_lines
log.debug("next_address: $%04x", next_address)
if next_address == 0x0000:
# program end
log.debug("return: %s", repr(basic_lines))
return basic_lines
assert next_address > program_start, "Next address $%04x not bigger than program start $%04x ?!?" % (
next_address, program_start
)
line_number = (dump[2] << 8) + dump[3]
log.debug("line_number: %i", line_number)
length = next_address - program_start
log.debug("length: %i", length)
tokens = dump[4:length]
log.debug("tokens:\n\t%s", "\n\t".join(self.token_util.pformat_tokens(tokens)))
basic_line = BasicLine(self.token_util)
basic_line.token_load(line_number, tokens)
basic_lines.append(basic_line)
return self.dump2basic_lines(dump[length:], next_address, basic_lines)
def basic_lines2program_dump(self, basic_lines, program_start):
program_dump = bytearray()
current_address = program_start
count = len(basic_lines)
for no, line in enumerate(basic_lines, 1):
line.log_line()
line_tokens = line.get_tokens() + [0x00]
current_address += len(line_tokens) + 2
current_address_bytes = word2bytes(current_address) # e.g.: word2bytes(0xff09) -> (255, 9)
program_dump += bytearray(current_address_bytes)
if no == count: # It's the last line
line_tokens += [0x00, 0x00]
program_dump += bytearray(line_tokens)
return program_dump
def ascii_listing2basic_lines(self, txt):
basic_lines = []
for line in txt.splitlines():
line = line.strip()
if line:
basic_line = BasicLine(self.token_util)
basic_line.ascii_load(line)
basic_lines.append(basic_line)
return basic_lines
def debug_listing(self, basic_lines):
for line in basic_lines:
line.log_line()
def log_ram_content(self, program_start, level=99):
ram_content = self.basic_lines2program_dump(program_start)
log_program_dump(ram_content, level)
def ascii_listing2program_dump(self, basic_program_ascii, program_start):
basic_lines = self.ascii_listing2basic_lines(basic_program_ascii)
self.debug_listing(basic_lines)
return self.basic_lines2program_dump(basic_lines, program_start)
# def parsed_lines2program_dump(self, parsed_lines, program_start):
# for line_no, code_objects in sorted(parsed_lines.items()):
# for code_object in code_objects:
def program_dump2ascii_lines(self, dump, program_start):
basic_lines = self.dump2basic_lines(dump, program_start)
log.info("basic_lines: %s", repr(basic_lines))
ascii_lines = []
for line in basic_lines:
ascii_lines.append(line.get_content())
return ascii_lines
|
6809/dragonlib | dragonlib/core/basic.py | RenumTool.get_destinations | python | def get_destinations(self, ascii_listing):
self.destinations = set()
def collect_destinations(matchobj):
numbers = matchobj.group("no")
if numbers:
self.destinations.update(set(
[n.strip() for n in numbers.split(",")]
))
for line in self._iter_lines(ascii_listing):
self.renum_regex.sub(collect_destinations, line)
return sorted([int(no) for no in self.destinations if no]) | returns all line numbers that are used in a jump. | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic.py#L451-L466 | [
"def _iter_lines(self, ascii_listing):\n lines = ascii_listing.splitlines()\n lines = [line.strip() for line in lines if line.strip()]\n for line in lines:\n yield line\n"
] | class RenumTool(object):
"""
Renumber a BASIC program
"""
def __init__(self, renum_regex):
self.line_no_regex = re.compile("(?P<no>\d+)(?P<code>.+)")
self.renum_regex = re.compile(renum_regex, re.VERBOSE)
def renum(self, ascii_listing):
self.renum_dict = self.create_renum_dict(ascii_listing)
log.info("renum: %s",
", ".join([
"%s->%s" % (o, n)
for o, n in sorted(self.renum_dict.items())
])
)
new_listing = []
for new_number, line in enumerate(self._iter_lines(ascii_listing), 1):
new_number *= 10
line = self.line_no_regex.sub("%s\g<code>" % new_number, line)
new_line = self.renum_regex.sub(self.renum_inline, line)
log.debug("%r -> %r", line, new_line)
new_listing.append(new_line)
return "\n".join(new_listing)
def _iter_lines(self, ascii_listing):
lines = ascii_listing.splitlines()
lines = [line.strip() for line in lines if line.strip()]
for line in lines:
yield line
def _get_new_line_number(self, line, old_number):
try:
new_number = "%s" % self.renum_dict[old_number]
except KeyError:
log.error(
"Error in line '%s': line no. '%s' doesn't exist.",
line, old_number
)
new_number = old_number
return new_number
def renum_inline(self, matchobj):
# log.critical(matchobj.groups())
old_numbers = matchobj.group("no")
if old_numbers[-1] == " ":
# e.g.: space before comment: ON X GOTO 1,2 ' Comment
space_after = " "
else:
space_after = ""
old_numbers = [n.strip() for n in old_numbers.split(",")]
new_numbers = [
self._get_new_line_number(matchobj.group(0), old_number)
for old_number in old_numbers
]
return "".join([
matchobj.group("statement"),
matchobj.group("space"),
",".join(new_numbers), space_after
])
def create_renum_dict(self, ascii_listing):
old_numbers = [match[0] for match in self.line_no_regex.findall(ascii_listing)]
renum_dict = {}
for new_number, old_number in enumerate(old_numbers, 1):
new_number *= 10
renum_dict[old_number] = new_number
return renum_dict
|
6809/dragonlib | dragonlib/utils/auto_shift.py | invert_shift | python | def invert_shift(chars):
result = ""
for char in chars:
if char in string.ascii_lowercase:
# log.critical("auto shift lowercase char %s to UPPERCASE", repr(char))
char = char.upper()
elif char in string.ascii_uppercase:
# log.critical("auto shift UPPERCASE char %s to lowercase", repr(char))
char = char.lower()
result += char
return result | >>> invert_shift("a")
'A'
>>> invert_shift("A")
'a'
>>> invert_shift("123 foo 456 BAR #!")
'123 FOO 456 bar #!' | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/utils/auto_shift.py#L17-L36 | null | # encoding:utf8
"""
DragonPy - Dragon 32 emulator in Python
=======================================
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import string
if __name__ == '__main__':
import doctest
print(doctest.testmod(
# verbose=1
))
|
6809/dragonlib | dragonlib/dragon32/pygments_lexer.py | list_styles | python | def list_styles(style_name):
style = get_style_by_name(style_name)
keys = list(style)[0][1]
Styles = namedtuple("Style", keys)
existing_styles = {}
for ttype, ndef in style:
s = Styles(**ndef)
if s in existing_styles:
existing_styles[s].append(ttype)
else:
existing_styles[s] = [ttype]
for ndef, ttypes in existing_styles.items():
print(ndef)
for ttype in sorted(ttypes):
print("\t%s" % str(ttype).split("Token.",1)[1]) | Just list all different styles entries | train | https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/dragon32/pygments_lexer.py#L87-L108 | null | # encoding:utf8
"""
DragonLib - needful python modules for Dragon/CoCo stuff
========================================================
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014 by the DragonLib team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import re
from collections import namedtuple
from pygments.lexer import RegexLexer
from pygments.styles import get_style_by_name
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
class BasicLexer(RegexLexer):
"""
Pygments lexer for Dragon/CoCo BASIC
"""
name = 'Dragon/CoCo BASIC'
aliases = ['basic']
filenames = ['*.bas']
tokens = {
'root': [
(r"(REM|').*\n", Comment.Single),
(r'\s+', Text),
(r'^\d+', Name.Label),
(
r'RUN|RESTORE|STOP|RENUM|'
r'GOTO|'
r'OPEN|CLOSE|READ|CLOAD|CSAVE|DLOAD|LLIST|MOTOR|SKIPF|'
r'LIST|CLEAR|NEW|EXEC|DEL|EDIT|TRON|TROFF',
Keyword
),
(
r'SOUND|AUDIOLINE|PLAY|'
r'PCLS|PSET|SCREEN|PCLEAR|COLOR|CIRCLE|PAINT|GET|PUT|DRAW|PCOPY|PMODE',
Keyword.Reserved
),
(r'DATA|DIM|LET|DEF', Keyword.Declaration),
(
r'PRINT|CLS|INPUT|INKEY$|'
r'HEX$|LEFT$|RIGHT$|MID$|STRING$|STR$|CHR$|'
r'SGN|INT|ABS|POS|RND|SQR|LOG|EXP|SIN|COS|TAN|ATN|LEN|VAL|ASC',
Name.Builtin
),
(
r'FOR|TO|STEP|NEXT|IF|THEN|ELSE|RETURN|'
r'GOSUB|'
r'POKE|PEEK|'
r'ON|END|CONT|SET|RESET|PRESET|TAB|SUB|FN|OFF|'
r'USING|EOF|JOYSTK|FIX|POINT|MEM|VARPTR|INSTR|TIMER|PPOINT|USR',
Name.Function
),
(r'([+\-*/^>=<])', Operator),
(r'AND|OR|NOT', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(self, text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'\d+', text):
return 0.2
if __name__ == "__main__":
list_styles("default") |
luismasuelli/python-cantrips | cantrips/patterns/snapshot.py | can_take | python | def can_take(attrs_to_freeze=(), defaults=None, source_attr='source', instance_property_name='snapshot', inner_class_name='Snapshot'):
def wrapper(klass):
Snapshot = namedtuple(inner_class_name, tuple(attrs_to_freeze) + (source_attr,))
doc = """
From the current instance collects the following attributes:
%s
Additionally, using the attribute '%s', collects a reference
to the current instance.
""" % (', '.join(attrs_to_freeze), source_attr)
def instance_method(self):
return Snapshot(**dict({
k: (getattr(self, k, defaults(k)) if callable(defaults) else getattr(self, k)) for k in attrs_to_freeze
}, **{source_attr: self}))
instance_method.__doc__ = doc
setattr(klass, instance_property_name, property(instance_method))
setattr(klass, inner_class_name, Snapshot)
return klass
return wrapper | Decorator to make a class allow their instances to generate
snapshot of themselves.
Decorates the class by allowing it to have:
* A custom class to serve each snapshot. Such class
will have a subset of attributes to serve from the object,
and a special designed attribute ('source', by default) to
serve the originating object. Such class will be stored
under custom name under the generating (decorated) class.
* An instance method (actually: property) which will yield
the snapshot for the instance. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/snapshot.py#L4-L39 | null | from collections import namedtuple
|
luismasuelli/python-cantrips | cantrips/console.py | input | python | def input(message, until, single_char=False, transform=lambda a: a):
def _getch(message):
print(message, end='')
return getch()
while True:
text = transform(line_input(message) if not single_char else _getch(message))
if until(text):
return text | Keeps asking for input (each time in a new line) until a condition
over the input text evaluates to true. Returns the input. This
input can be done using text input or character input. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/console.py#L50-L63 | [
"def input(message, until, single_char=False, transform=lambda a: a):\n",
"def _getch(message):\n print(message, end='')\n return getch()\n",
"def _valid(character):\n if character not in options:\n print(error_message % character)\n",
"return input(\"%s [%s]\" % (message, options), _valid, True, lambda a: a.lower())\n"
] | from __future__ import print_function
from future.builtins.misc import input as line_input
class _Getch:
"""
Gets a single character from standard input. Does not echo to the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
return msvcrt.getch()
getch = _Getch()
igetch = lambda: getch().lower()
igetch.__doc__ = """
Gets a single character from standard input. Does not echo to the screen.
Converts it to lowercase, so it is not important the actual input case.
"""
def input_option(message, options="yn", error_message=None):
"""
Reads an option from the screen, with a specified prompt.
Keeps asking until a valid option is sent by the user.
"""
def _valid(character):
if character not in options:
print(error_message % character)
return input("%s [%s]" % (message, options), _valid, True, lambda a: a.lower())
|
luismasuelli/python-cantrips | cantrips/console.py | input_option | python | def input_option(message, options="yn", error_message=None):
def _valid(character):
if character not in options:
print(error_message % character)
return input("%s [%s]" % (message, options), _valid, True, lambda a: a.lower()) | Reads an option from the screen, with a specified prompt.
Keeps asking until a valid option is sent by the user. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/console.py#L66-L74 | [
"def input(message, until, single_char=False, transform=lambda a: a):\n \"\"\"\n Keeps asking for input (each time in a new line) until a condition\n over the input text evaluates to true. Returns the input. This\n input can be done using text input or character input.\n \"\"\"\n def _getch(message):\n print(message, end='')\n return getch()\n\n while True:\n text = transform(line_input(message) if not single_char else _getch(message))\n if until(text):\n return text\n"
] | from __future__ import print_function
from future.builtins.misc import input as line_input
class _Getch:
"""
Gets a single character from standard input. Does not echo to the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
return msvcrt.getch()
getch = _Getch()
igetch = lambda: getch().lower()
igetch.__doc__ = """
Gets a single character from standard input. Does not echo to the screen.
Converts it to lowercase, so it is not important the actual input case.
"""
def input(message, until, single_char=False, transform=lambda a: a):
"""
Keeps asking for input (each time in a new line) until a condition
over the input text evaluates to true. Returns the input. This
input can be done using text input or character input.
"""
def _getch(message):
print(message, end='')
return getch()
while True:
text = transform(line_input(message) if not single_char else _getch(message))
if until(text):
return text
|
luismasuelli/python-cantrips | cantrips/decorators.py | customizable | python | def customizable(subdecorator, **defaults):
@wraps(subdecorator, assigned=('__name__', '__module__'))
def _decorator(*func, **options):
lfunc = len(func)
lopts = len(options)
if lfunc and lopts:
raise TypeError("Cannot specify both positional arguments and options")
if not lfunc and not lopts:
raise TypeError("Either positional arguments or options must be specified")
if lfunc > 1:
raise TypeError("Cannot specify more than one positional argument")
elif lfunc == 1:
# case 1 - the returned function is being used as the direct
# decorator. It must call the subdecorator using the values by
# default AND the wrapped function.
return subdecorator(func[0], **defaults)
else:
# case 2 - the returned function is passed more options and so
# will return the decorator.
return lambda f: subdecorator(f, **dict(defaults, **options))
return _decorator | Allows to create customizable decorators. Such decorators (like Django's django.contrib.auth.login_required)
can be used in two different ways:
# positionally
@decorator
def myfunc(...):
...
and
# namely
@decorator(**opts)
def myfunc(...):
...
Being both of them a different type of call (first-order call, and higher-order call). For this to work, it
needs (as first/subdecorator= argument) a function to be passed which takes (function, option1=v1, option2=v2,
...) arguments. Such function is the actual implementation of the decorator (being the first argument as
positional - the wrapped function - and the remaining arguments just parameters for the implementation). It
also can have many by-name arguments which will act as default arguments for the decorator implementation
(and so, the specified by-name arguments must be expected/well-received by the implementation).
The returned value is a function which can be invoked either positionally (only one argument: the function to wrap)
or by-name (arguments as needed, but expected in the decorator's implementation).
If called positionally, it is a decorator that wraps a function by calling the implementation passing the to-wrap
function AND the default parameters as specified.
If called by-name, its return value is a decorator that wraps a function by calling the implementation passing
the to-wrap function AND the default parameters updated by the by-name parameters.
:param subdecorator: Decorator implementation. Must take a function as positional argument, and many arguments (as
desired) which will be passed by-name and will customize the implementation's behavior.
:param defaults: Default by-name arguments for the implementation (additional reason to have them separate is that
perhaps the user has no control over the function which will be the decorator's implementation).
:return: a function which will act as decorator (if called positionally) or as a function returning a decorator
(if called by-name). | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/decorators.py#L5-L69 | null | from functools import wraps
from future.utils import PY3
try:
from contextlib import ContextDecorator
except ImportError:
class ContextDecorator(object):
"""
A base class that enables a context manager to also be used as a decorator.
"""
def __call__(self, func):
assigned = ('__module__', '__name__', '__doc__')
if not PY3:
assigned = tuple(foo for foo in ('__module__', '__name__', '__doc__') if hasattr(func, foo))
@wraps(func, assigned=assigned)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
|
luismasuelli/python-cantrips | cantrips/iteration.py | items | python | def items(iterable):
if hasattr(iterable, 'iteritems'):
return (p for p in iterable.iteritems())
elif hasattr(iterable, 'items'):
return (p for p in iterable.items())
else:
return (p for p in enumerate(iterable)) | Iterates over the items of a sequence. If the sequence supports the
dictionary protocol (iteritems/items) then we use that. Otherwise
we use the enumerate built-in function. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/iteration.py#L9-L20 | null | import operator
try:
from itertools import izip
except ImportError:
izip = zip
def iterable(value):
"""
If the value is not iterable, we convert it to an iterable containing
that only value.
:param x:
:return:
"""
try:
return iter(value)
except TypeError:
return value,
try:
from itertools import accumulate
except ImportError:
def accumulate(p, func=operator.add):
"""
Python3's itertools accumulate being ported to PY2
:param p:
:param func:
:return:
"""
iterator = iter(p)
current = next(iterator)
for k in iterator:
yield current
current = func(current, k)
yield current
def labeled_accumulate(sequence, keygetter=operator.itemgetter(0), valuegetter=operator.itemgetter(1), accumulator=operator.add):
"""
Accumulates input elements according to accumulate(), but keeping certain data (per element, from the original
sequence/iterable) in the target elements, like behaving as keys or legends.
:param sequence:
:param keygetter:
:param valuegetter:
:return:
"""
return izip((keygetter(item) for item in sequence),
accumulate((valuegetter(item) for item in sequence), accumulator)) |
luismasuelli/python-cantrips | cantrips/iteration.py | labeled_accumulate | python | def labeled_accumulate(sequence, keygetter=operator.itemgetter(0), valuegetter=operator.itemgetter(1), accumulator=operator.add):
return izip((keygetter(item) for item in sequence),
accumulate((valuegetter(item) for item in sequence), accumulator)) | Accumulates input elements according to accumulate(), but keeping certain data (per element, from the original
sequence/iterable) in the target elements, like behaving as keys or legends.
:param sequence:
:param keygetter:
:param valuegetter:
:return: | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/iteration.py#L54-L64 | [
"def accumulate(p, func=operator.add):\n \"\"\"\n Python3's itertools accumulate being ported to PY2\n :param p:\n :param func:\n :return:\n \"\"\"\n iterator = iter(p)\n current = next(iterator)\n for k in iterator:\n yield current\n current = func(current, k)\n yield current\n"
] | import operator
try:
from itertools import izip
except ImportError:
izip = zip
def items(iterable):
"""
Iterates over the items of a sequence. If the sequence supports the
dictionary protocol (iteritems/items) then we use that. Otherwise
we use the enumerate built-in function.
"""
if hasattr(iterable, 'iteritems'):
return (p for p in iterable.iteritems())
elif hasattr(iterable, 'items'):
return (p for p in iterable.items())
else:
return (p for p in enumerate(iterable))
def iterable(value):
"""
If the value is not iterable, we convert it to an iterable containing
that only value.
:param x:
:return:
"""
try:
return iter(value)
except TypeError:
return value,
try:
from itertools import accumulate
except ImportError:
def accumulate(p, func=operator.add):
"""
Python3's itertools accumulate being ported to PY2
:param p:
:param func:
:return:
"""
iterator = iter(p)
current = next(iterator)
for k in iterator:
yield current
current = func(current, k)
yield current
|
luismasuelli/python-cantrips | cantrips/functions.py | is_method | python | def is_method(method, flags=METHOD_ALL):
if isinstance(method, types.UnboundMethodType):
if flags & METHOD_CLASS and issubclass(method.im_class, type):
return True
if flags & METHOD_INSTANCE and not issubclass(method.im_class, type):
return True
if flags & METHOD_BOUND and method.im_self is not None:
return True
if flags & METHOD_UNBOUND and method.im_self is None:
return True
return False | Determines whether the passed value is a method satisfying certain conditions:
* Being instance method.
* Being class method.
* Being bound method.
* Being unbound method.
Flag check is considered or-wise. The default is to consider every option.
:param method:
:param flags:
:return: | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/functions.py#L11-L32 | null | from __future__ import absolute_import
import types
METHOD_CLASS = 1
METHOD_INSTANCE = 2
METHOD_BOUND = 4
METHOD_UNBOUND = 8
METHOD_ALL = METHOD_CLASS | METHOD_INSTANCE | METHOD_BOUND | METHOD_UNBOUND
def is_static(method):
"""
Determines whether the passed value is a function (NOT a method).
:param method:
:return:
"""
return isinstance(method, types.FunctionType)
is_function = is_static |
luismasuelli/python-cantrips | cantrips/types/exception.py | factory | python | def factory(codes, base=_Exception):
if not issubclass(base, _Exception):
raise FactoryException("Invalid class passed as parent: Must be a subclass of an Exception class created with this function",
FactoryException.INVALID_EXCEPTION_CLASS, intended_parent=base)
class Error(base):
pass
if isinstance(codes, (list, set, tuple, frozenset)):
codes = {e: e for e in codes}
if not isinstance(codes, dict):
raise FactoryException("Factory codes must be a dict str -> object",
FactoryException.INVALID_CODES_LIST, intended_codes=codes)
for code, value in codes.items():
try:
setattr(Error, code, value)
except TypeError:
raise FactoryException("Cannot set class attribute: (%r) -> (%r)" % (code, value),
FactoryException.INVALID_CODE_VALUE, attribute=code, value=value)
return Error | Creates a custom exception class with arbitrary error codes and arguments. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/types/exception.py#L11-L37 | null | from cantrips.types.arguments import Arguments
class _Exception(Arguments, Exception):
def __init__(self, message, code, *args, **kwargs):
Arguments.__init__(self, message=message, code=code, *args, **kwargs)
Exception.__init__(self, message)
FactoryException = factory({'INVALID_EXCEPTION_CLASS': 1,
'INVALID_CODES_LIST': 2,
'INVALID_CODE_VALUE': 3}) |
luismasuelli/python-cantrips | cantrips/features.py | Feature.import_it | python | def import_it(cls):
if not cls in cls._FEATURES:
try:
cls._FEATURES[cls] = cls._import_it()
except ImportError:
raise cls.Error(cls._import_error_message(), cls.Error.UNSATISFIED_IMPORT_REQ)
return cls._FEATURES[cls] | Performs the import only once. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/features.py#L12-L21 | null | class Feature(object):
"""
Tries to import a specific feature.
"""
Error = factory(['UNSATISFIED_IMPORT_REQ'])
_FEATURES = {}
@classmethod
@classmethod
def _import_it(cls):
"""
Internal method - performs the import and returns the imported object.
"""
return None
@classmethod
def _import_error_message(cls):
"""
Internal method - displays the exception message
"""
return None |
luismasuelli/python-cantrips | cantrips/entropy.py | weighted_random | python | def weighted_random(sequence):
if isinstance(sequence, dict):
sequence = sequence.items()
accumulated = list(labeled_accumulate(sequence))
r = random.random() * accumulated[-1][1]
for k, v in accumulated:
if r < v:
return k
#punto inalcanzable a priori
return None | Given a sequence of pairs (element, weight) where weight is an addable/total-order-comparable (e.g. a number),
it returns a random element (first item in each pair) given in a non-uniform way given by the weight of the
element (second item in each pair)
:param sequence: sequence/iterator of pairs (element, weight)
:return: any value in the first element of each pair | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/entropy.py#L20-L38 | [
"def labeled_accumulate(sequence, keygetter=operator.itemgetter(0), valuegetter=operator.itemgetter(1), accumulator=operator.add):\n \"\"\"\n Accumulates input elements according to accumulate(), but keeping certain data (per element, from the original\n sequence/iterable) in the target elements, like behaving as keys or legends.\n :param sequence:\n :param keygetter:\n :param valuegetter:\n :return:\n \"\"\"\n return izip((keygetter(item) for item in sequence),\n accumulate((valuegetter(item) for item in sequence), accumulator))"
] | import datetime
from cantrips.iteration import labeled_accumulate
import random
from hashlib import sha1, sha224, sha256, sha384, sha512
from base64 import b64encode
_HASHES = {
'sha1': sha1,
'sha224': sha224,
'sha256': sha256,
'sha384': sha384,
'sha512': sha512,
}
random.seed()
def nonce(algorithm='sha1', to_hex=True):
"""
Generates a nonce (a pseudo-random token). It is seeded with the current date/time.
:param algorithm: a string being any of the SHA hash algorithms.
:param to_hex: a boolean describing whether we want a base64 digest or a hexadecimal digest
:return:
"""
if algorithm not in _HASHES:
return None
result = _HASHES[algorithm](datetime.datetime.now().isoformat())
return result.hexdigest() if to_hex else b64encode(result.digest())
|
luismasuelli/python-cantrips | cantrips/entropy.py | nonce | python | def nonce(algorithm='sha1', to_hex=True):
if algorithm not in _HASHES:
return None
result = _HASHES[algorithm](datetime.datetime.now().isoformat())
return result.hexdigest() if to_hex else b64encode(result.digest()) | Generates a nonce (a pseudo-random token). It is seeded with the current date/time.
:param algorithm: a string being any of the SHA hash algorithms.
:param to_hex: a boolean describing whether we want a base64 digest or a hexadecimal digest
:return: | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/entropy.py#L41-L51 | null | import datetime
from cantrips.iteration import labeled_accumulate
import random
from hashlib import sha1, sha224, sha256, sha384, sha512
from base64 import b64encode
_HASHES = {
'sha1': sha1,
'sha224': sha224,
'sha256': sha256,
'sha384': sha384,
'sha512': sha512,
}
random.seed()
def weighted_random(sequence):
"""
Given a sequence of pairs (element, weight) where weight is an addable/total-order-comparable (e.g. a number),
it returns a random element (first item in each pair) given in a non-uniform way given by the weight of the
element (second item in each pair)
:param sequence: sequence/iterator of pairs (element, weight)
:return: any value in the first element of each pair
"""
if isinstance(sequence, dict):
sequence = sequence.items()
accumulated = list(labeled_accumulate(sequence))
r = random.random() * accumulated[-1][1]
for k, v in accumulated:
if r < v:
return k
#punto inalcanzable a priori
return None
|
luismasuelli/python-cantrips | cantrips/patterns/actions.py | Action.as_method | python | def as_method(self, docstring=""):
method = lambda obj, *args, **kwargs: self(obj, *args, **kwargs)
if docstring:
method.__doc__ = docstring
return method | Converts this action to a function or method.
An optional docstring may be passed. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/actions.py#L12-L20 | null | class Action(object):
"""
This class can define a custom behavior, and then be exposed as a
method. It doesn't matter if such method is instance or class method.
This is intended to become a sort of "configurable method".
"""
def __call__(self, obj, *args, **kwargs):
raise NotImplementedError
|
luismasuelli/python-cantrips | cantrips/patterns/broadcast.py | IBroadcast.BROADCAST_FILTER_OTHERS | python | def BROADCAST_FILTER_OTHERS(user):
if not isinstance(user, (set,frozenset,list,tuple)):
user = (user,)
return lambda u, command, *args, **kwargs: u not in user | HIGH-ORDER (pass it as IBroadcast.BROADCAST_FILTER_OTHERS(user-or-sequence))
Criteria to broadcast to every user but the current(s). | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L58-L65 | null | class IBroadcast(INotifier, IRegistrar):
"""
Offers behavior to notify each user.
"""
@staticmethod
def BROADCAST_FILTER_ALL(user, command, *args, **kwargs):
"""
FIRST-ORDER (pass it as IBroadcast.BROADCAST_FILTER_ALL)
Criteria to broadcast to every user
"""
return True
@staticmethod
@staticmethod
def BROADCAST_FILTER_AND(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: all(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def BROADCAST_FILTER_OR(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def BROADCAST_FILTER_NOT(func):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs)
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) |
luismasuelli/python-cantrips | cantrips/patterns/broadcast.py | IBroadcast.BROADCAST_FILTER_AND | python | def BROADCAST_FILTER_AND(*funcs):
return lambda u, command, *args, **kwargs: all(f(u, command, *args, **kwargs) for f in funcs) | Composes the passed filters into an and-joined filter. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L68-L72 | null | class IBroadcast(INotifier, IRegistrar):
"""
Offers behavior to notify each user.
"""
@staticmethod
def BROADCAST_FILTER_ALL(user, command, *args, **kwargs):
"""
FIRST-ORDER (pass it as IBroadcast.BROADCAST_FILTER_ALL)
Criteria to broadcast to every user
"""
return True
@staticmethod
def BROADCAST_FILTER_OTHERS(user):
"""
HIGH-ORDER (pass it as IBroadcast.BROADCAST_FILTER_OTHERS(user-or-sequence))
Criteria to broadcast to every user but the current(s).
"""
if not isinstance(user, (set,frozenset,list,tuple)):
user = (user,)
return lambda u, command, *args, **kwargs: u not in user
@staticmethod
@staticmethod
def BROADCAST_FILTER_OR(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def BROADCAST_FILTER_NOT(func):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs)
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) |
luismasuelli/python-cantrips | cantrips/patterns/broadcast.py | IBroadcast.BROADCAST_FILTER_OR | python | def BROADCAST_FILTER_OR(*funcs):
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs) | Composes the passed filters into an and-joined filter. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L75-L79 | null | class IBroadcast(INotifier, IRegistrar):
"""
Offers behavior to notify each user.
"""
@staticmethod
def BROADCAST_FILTER_ALL(user, command, *args, **kwargs):
"""
FIRST-ORDER (pass it as IBroadcast.BROADCAST_FILTER_ALL)
Criteria to broadcast to every user
"""
return True
@staticmethod
def BROADCAST_FILTER_OTHERS(user):
"""
HIGH-ORDER (pass it as IBroadcast.BROADCAST_FILTER_OTHERS(user-or-sequence))
Criteria to broadcast to every user but the current(s).
"""
if not isinstance(user, (set,frozenset,list,tuple)):
user = (user,)
return lambda u, command, *args, **kwargs: u not in user
@staticmethod
def BROADCAST_FILTER_AND(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: all(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
@staticmethod
def BROADCAST_FILTER_NOT(func):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs)
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) |
luismasuelli/python-cantrips | cantrips/patterns/broadcast.py | IBroadcast.BROADCAST_FILTER_NOT | python | def BROADCAST_FILTER_NOT(func):
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs) | Composes the passed filters into an and-joined filter. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L82-L86 | null | class IBroadcast(INotifier, IRegistrar):
"""
Offers behavior to notify each user.
"""
@staticmethod
def BROADCAST_FILTER_ALL(user, command, *args, **kwargs):
"""
FIRST-ORDER (pass it as IBroadcast.BROADCAST_FILTER_ALL)
Criteria to broadcast to every user
"""
return True
@staticmethod
def BROADCAST_FILTER_OTHERS(user):
"""
HIGH-ORDER (pass it as IBroadcast.BROADCAST_FILTER_OTHERS(user-or-sequence))
Criteria to broadcast to every user but the current(s).
"""
if not isinstance(user, (set,frozenset,list,tuple)):
user = (user,)
return lambda u, command, *args, **kwargs: u not in user
@staticmethod
def BROADCAST_FILTER_AND(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: all(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def BROADCAST_FILTER_OR(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) |
luismasuelli/python-cantrips | cantrips/patterns/broadcast.py | IBroadcast.broadcast | python | def broadcast(self, command, *args, **kwargs):
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) | Notifies each user with a specified command. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L88-L95 | [
"def items(iterable):\n \"\"\"\n Iterates over the items of a sequence. If the sequence supports the\n dictionary protocol (iteritems/items) then we use that. Otherwise\n we use the enumerate built-in function.\n \"\"\"\n if hasattr(iterable, 'iteritems'):\n return (p for p in iterable.iteritems())\n elif hasattr(iterable, 'items'):\n return (p for p in iterable.items())\n else:\n return (p for p in enumerate(iterable))\n",
"def notify(self, user, command, *args, **kwargs):\n \"\"\"\n Notifies a user with a specified command/data.\n \"\"\"\n\n raise NotImplementedError\n",
"def users(self):\n \"\"\"\n Gets the list of users.\n \"\"\"\n\n raise NotImplementedError\n"
] | class IBroadcast(INotifier, IRegistrar):
"""
Offers behavior to notify each user.
"""
@staticmethod
def BROADCAST_FILTER_ALL(user, command, *args, **kwargs):
"""
FIRST-ORDER (pass it as IBroadcast.BROADCAST_FILTER_ALL)
Criteria to broadcast to every user
"""
return True
@staticmethod
def BROADCAST_FILTER_OTHERS(user):
"""
HIGH-ORDER (pass it as IBroadcast.BROADCAST_FILTER_OTHERS(user-or-sequence))
Criteria to broadcast to every user but the current(s).
"""
if not isinstance(user, (set,frozenset,list,tuple)):
user = (user,)
return lambda u, command, *args, **kwargs: u not in user
@staticmethod
def BROADCAST_FILTER_AND(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: all(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def BROADCAST_FILTER_OR(*funcs):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: any(f(u, command, *args, **kwargs) for f in funcs)
@staticmethod
def BROADCAST_FILTER_NOT(func):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs)
|
luismasuelli/python-cantrips | cantrips/patterns/identify.py | List.create | python | def create(self, key, *args, **kwargs):
instance = self._class(key, *args, **kwargs)
self._events.create.trigger(list=self, instance=instance, key=key, args=args, kwargs=kwargs)
return self.insert(instance) | Creates and inserts an identified object with the passed params
using the specified class. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/identify.py#L53-L60 | [
"def insert(self, identified):\n \"\"\"\n Inserts an already-created identified object of the expected class.\n \"\"\"\n\n if not isinstance(identified, self._class):\n raise self.Error(\"Passed instance is not of the needed class\",\n self.Error.INVALID_INSTANCE_CLASS, instance=identified)\n\n try:\n if self._objects[identified.key] != identified:\n raise self.Error(\"Passes instance's key '%s' is already occupied\" % identified.key,\n self.Error.KEY_EXISTS, key=identified.key, instance=identified)\n except KeyError:\n self._objects[identified.key] = identified\n self._events.insert.trigger(list=self, instance=identified)\n return identified\n"
] | class List(object):
"""
Keeps a list of identified objects by key. A key cannot be registered twice.
It can track when objects are created, inserted, and removed from it.
"""
Error = factory(['INVALID_CLASS', 'INVALID_INSTANCE_CLASS', 'KEY_EXISTS', 'KEY_NOT_EXISTS', 'NOT_SAME_OBJECT'])
def __init__(self, element_class=Identified):
if not issubclass(element_class, Identified):
raise self.Error("class '%s' is not a valid Identified subclass" % element_class.__name__,
self.Error.INVALID_CLASS, element_class=element_class)
self._class = element_class
self._objects = {}
self._events = Eventful(('create', 'insert', 'remove'))
@property
def events(self):
"""
Returns the available events for the current list.
Events are limited to:
create: when an instance is created (but not yet inserted)
by the `create(key, *args, **kwargs)` method.
Triggered passing the current list, the instance,
the key, and the remaining arguments.
insert: when an instance is inserted by the
`insert(instance)` method.
Triggered passing the current list, and the instance.
remove: when an instance is removed by the
`remove(instance|key)` method.
Triggered passing the current list, the instance, and
indicating whether it was by value or not (not=it was
by key).
"""
return self._events
def insert(self, identified):
"""
Inserts an already-created identified object of the expected class.
"""
if not isinstance(identified, self._class):
raise self.Error("Passed instance is not of the needed class",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
try:
if self._objects[identified.key] != identified:
raise self.Error("Passes instance's key '%s' is already occupied" % identified.key,
self.Error.KEY_EXISTS, key=identified.key, instance=identified)
except KeyError:
self._objects[identified.key] = identified
self._events.insert.trigger(list=self, instance=identified)
return identified
def remove(self, identified):
"""
Removes an already-created identified object.
A key may be passed instead of an identified object.
If an object is passed, and its key is held by another
object inside the record, an error is triggered.
Returns the removed object.
"""
by_val = isinstance(identified, Identified)
if by_val:
key = identified.key
if not isinstance(identified, self._class):
raise self.Error("Such instance could never exist here",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
else:
key = identified
try:
popped = self._objects.pop(key)
if by_val and popped != identified:
raise self.Error("Trying to pop a different object which also has key '%s'" % popped.key,
self.Error.NOT_SAME_OBJECT, instance=identified, current=popped)
self._events.remove.trigger(list=self, instance=identified, by_val=by_val)
except KeyError:
raise self.Error("No object with key '%s' exists here",
self.Error.KEY_NOT_EXISTS, key=key, instance=identified if by_val else None)
def items(self):
"""
Gives a list -or iterator, in PY3- of inner items
"""
return self._objects.items()
if not PY3:
def iteritems(self):
"""
Gives an interator of inner items. This method is
only available in Python 2.x
"""
return self._objects.iteritems()
def __iter__(self):
"""
You can iterate over contained objects.
"""
return iter(self._objects)
def __len__(self):
"""
length of the list, in items.
"""
return len(self._objects)
def __getitem__(self, item):
"""
Returns a registered item by key (or the item itself, if it is an existent instance).
"""
if isinstance(item, Identified):
obj = self._objects[item.key]
if obj is not item:
raise KeyError(item)
return obj
else:
return self._objects[item]
def __contains__(self, item):
"""
Determines whether an identified object OR a key is registered.
"""
if isinstance(item, Identified):
if not isinstance(item, self._class):
return False
return item.key in self._objects and self._objects[item.key] is item
else:
return item in self._objects |
luismasuelli/python-cantrips | cantrips/patterns/identify.py | List.insert | python | def insert(self, identified):
if not isinstance(identified, self._class):
raise self.Error("Passed instance is not of the needed class",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
try:
if self._objects[identified.key] != identified:
raise self.Error("Passes instance's key '%s' is already occupied" % identified.key,
self.Error.KEY_EXISTS, key=identified.key, instance=identified)
except KeyError:
self._objects[identified.key] = identified
self._events.insert.trigger(list=self, instance=identified)
return identified | Inserts an already-created identified object of the expected class. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/identify.py#L62-L78 | null | class List(object):
"""
Keeps a list of identified objects by key. A key cannot be registered twice.
It can track when objects are created, inserted, and removed from it.
"""
Error = factory(['INVALID_CLASS', 'INVALID_INSTANCE_CLASS', 'KEY_EXISTS', 'KEY_NOT_EXISTS', 'NOT_SAME_OBJECT'])
def __init__(self, element_class=Identified):
if not issubclass(element_class, Identified):
raise self.Error("class '%s' is not a valid Identified subclass" % element_class.__name__,
self.Error.INVALID_CLASS, element_class=element_class)
self._class = element_class
self._objects = {}
self._events = Eventful(('create', 'insert', 'remove'))
@property
def events(self):
"""
Returns the available events for the current list.
Events are limited to:
create: when an instance is created (but not yet inserted)
by the `create(key, *args, **kwargs)` method.
Triggered passing the current list, the instance,
the key, and the remaining arguments.
insert: when an instance is inserted by the
`insert(instance)` method.
Triggered passing the current list, and the instance.
remove: when an instance is removed by the
`remove(instance|key)` method.
Triggered passing the current list, the instance, and
indicating whether it was by value or not (not=it was
by key).
"""
return self._events
def create(self, key, *args, **kwargs):
"""
Creates and inserts an identified object with the passed params
using the specified class.
"""
instance = self._class(key, *args, **kwargs)
self._events.create.trigger(list=self, instance=instance, key=key, args=args, kwargs=kwargs)
return self.insert(instance)
def remove(self, identified):
"""
Removes an already-created identified object.
A key may be passed instead of an identified object.
If an object is passed, and its key is held by another
object inside the record, an error is triggered.
Returns the removed object.
"""
by_val = isinstance(identified, Identified)
if by_val:
key = identified.key
if not isinstance(identified, self._class):
raise self.Error("Such instance could never exist here",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
else:
key = identified
try:
popped = self._objects.pop(key)
if by_val and popped != identified:
raise self.Error("Trying to pop a different object which also has key '%s'" % popped.key,
self.Error.NOT_SAME_OBJECT, instance=identified, current=popped)
self._events.remove.trigger(list=self, instance=identified, by_val=by_val)
except KeyError:
raise self.Error("No object with key '%s' exists here",
self.Error.KEY_NOT_EXISTS, key=key, instance=identified if by_val else None)
def items(self):
"""
Gives a list -or iterator, in PY3- of inner items
"""
return self._objects.items()
if not PY3:
def iteritems(self):
"""
Gives an interator of inner items. This method is
only available in Python 2.x
"""
return self._objects.iteritems()
def __iter__(self):
"""
You can iterate over contained objects.
"""
return iter(self._objects)
def __len__(self):
"""
length of the list, in items.
"""
return len(self._objects)
def __getitem__(self, item):
"""
Returns a registered item by key (or the item itself, if it is an existent instance).
"""
if isinstance(item, Identified):
obj = self._objects[item.key]
if obj is not item:
raise KeyError(item)
return obj
else:
return self._objects[item]
def __contains__(self, item):
"""
Determines whether an identified object OR a key is registered.
"""
if isinstance(item, Identified):
if not isinstance(item, self._class):
return False
return item.key in self._objects and self._objects[item.key] is item
else:
return item in self._objects |
luismasuelli/python-cantrips | cantrips/patterns/identify.py | List.remove | python | def remove(self, identified):
by_val = isinstance(identified, Identified)
if by_val:
key = identified.key
if not isinstance(identified, self._class):
raise self.Error("Such instance could never exist here",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
else:
key = identified
try:
popped = self._objects.pop(key)
if by_val and popped != identified:
raise self.Error("Trying to pop a different object which also has key '%s'" % popped.key,
self.Error.NOT_SAME_OBJECT, instance=identified, current=popped)
self._events.remove.trigger(list=self, instance=identified, by_val=by_val)
except KeyError:
raise self.Error("No object with key '%s' exists here",
self.Error.KEY_NOT_EXISTS, key=key, instance=identified if by_val else None) | Removes an already-created identified object.
A key may be passed instead of an identified object.
If an object is passed, and its key is held by another
object inside the record, an error is triggered.
Returns the removed object. | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/identify.py#L80-L106 | null | class List(object):
"""
Keeps a list of identified objects by key. A key cannot be registered twice.
It can track when objects are created, inserted, and removed from it.
"""
Error = factory(['INVALID_CLASS', 'INVALID_INSTANCE_CLASS', 'KEY_EXISTS', 'KEY_NOT_EXISTS', 'NOT_SAME_OBJECT'])
def __init__(self, element_class=Identified):
if not issubclass(element_class, Identified):
raise self.Error("class '%s' is not a valid Identified subclass" % element_class.__name__,
self.Error.INVALID_CLASS, element_class=element_class)
self._class = element_class
self._objects = {}
self._events = Eventful(('create', 'insert', 'remove'))
@property
def events(self):
"""
Returns the available events for the current list.
Events are limited to:
create: when an instance is created (but not yet inserted)
by the `create(key, *args, **kwargs)` method.
Triggered passing the current list, the instance,
the key, and the remaining arguments.
insert: when an instance is inserted by the
`insert(instance)` method.
Triggered passing the current list, and the instance.
remove: when an instance is removed by the
`remove(instance|key)` method.
Triggered passing the current list, the instance, and
indicating whether it was by value or not (not=it was
by key).
"""
return self._events
def create(self, key, *args, **kwargs):
"""
Creates and inserts an identified object with the passed params
using the specified class.
"""
instance = self._class(key, *args, **kwargs)
self._events.create.trigger(list=self, instance=instance, key=key, args=args, kwargs=kwargs)
return self.insert(instance)
def insert(self, identified):
"""
Inserts an already-created identified object of the expected class.
"""
if not isinstance(identified, self._class):
raise self.Error("Passed instance is not of the needed class",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
try:
if self._objects[identified.key] != identified:
raise self.Error("Passes instance's key '%s' is already occupied" % identified.key,
self.Error.KEY_EXISTS, key=identified.key, instance=identified)
except KeyError:
self._objects[identified.key] = identified
self._events.insert.trigger(list=self, instance=identified)
return identified
def items(self):
"""
Gives a list -or iterator, in PY3- of inner items
"""
return self._objects.items()
if not PY3:
def iteritems(self):
"""
Gives an interator of inner items. This method is
only available in Python 2.x
"""
return self._objects.iteritems()
def __iter__(self):
"""
You can iterate over contained objects.
"""
return iter(self._objects)
def __len__(self):
"""
length of the list, in items.
"""
return len(self._objects)
def __getitem__(self, item):
"""
Returns a registered item by key (or the item itself, if it is an existent instance).
"""
if isinstance(item, Identified):
obj = self._objects[item.key]
if obj is not item:
raise KeyError(item)
return obj
else:
return self._objects[item]
def __contains__(self, item):
"""
Determines whether an identified object OR a key is registered.
"""
if isinstance(item, Identified):
if not isinstance(item, self._class):
return False
return item.key in self._objects and self._objects[item.key] is item
else:
return item in self._objects |
luismasuelli/python-cantrips | cantrips/types/record.py | TrackableRecord.track_end | python | def track_end(self):
self.__tracking = False
changes = self.__changes
self.__changes = {}
return changes | Ends tracking of attributes changes.
Returns the changes that occurred to the attributes.
Only the final state of each attribute is obtained | train | https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/types/record.py#L33-L42 | null | class TrackableRecord(object):
"""
Can hold a set of changeable attributes. Attributes not initialized beforehand are NOT settable.
Can also track the changes of the attributes.
"""
def __init__(self, **kwargs):
self.__dict = kwargs
self.__tracking = False
self.__changes = {}
def __setattr__(self, key, value):
"""
Allows to edit attribute only belonging to the
initial attributes (it is like a per-object
__slots__), and tracks the changes of the
current instance's attributes if tracking
is enabled.
"""
if not key in self.__dict:
raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, key))
old = self.__dict[key]
self.__dict[key] = value
if self.__tracking:
self.__changes.setdefault(key, {'old': old}).update({'new': value})
def track_start(self):
"""
Begins tracking of attributes changes.
"""
self.__tracking = True
|
vilmibm/done | sql_interp/sql_interp.py | SQLInterp.interp | python | def interp(self, *args):
sql = ""
bind = ()
def _append_sql(sql, part):
"Handle whitespace when appending properly."
if len(sql) == 0:
return part
elif sql[-1] == ' ':
return sql + part
else:
return sql + ' ' + part
for arg in args:
if type(arg) is str:
# Strings are treated as raw SQL.
sql = _append_sql(sql, arg)
elif isinstance(arg, Esc):
# If this is an instance of Esc, ask the object
# how to represent the data given the context.
arg_sql, arg_bind = arg.to_string(sql)
sql = _append_sql(sql, arg_sql)
bind += arg_bind
else:
# Any argument given that is not a string or Esc
# is an error.
arg_sql, arg_bind = self.esc(arg).to_string(sql)
sql = _append_sql(sql, arg_sql)
bind += arg_bind
return (sql, bind) | This method takes a list of SQL snippets and returns a SQL statement and
a list of bind variables to be passed to the DB API's execute method. | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/sql_interp/sql_interp.py#L16-L50 | [
"def esc(self, val):\n \"\"\"\n Returns the given object in the appropriate wrapper class from esc_types.py.\n\n In most cases, you will not need to call this directly. However, if you are\n passing a string to the interp method that should be used as an SQL bind value\n and not raw SQL, you must pass it to this method to avoid a SQL injection\n vulnerability. For example:\n\n >>> sqli = SQLInterp()\n >>> first_name = 'John'\n\n The following is wrong! This could lead to a SQL injection attack.\n\n >>> sqli.interp(\"SELECT * FROM table WHERE first_name =\", first_name)\n ('SELECT * FROM table WHERE first_name = John', ())\n\n This is the correct way.\n\n >>> sqli.interp(\"SELECT * FROM table WHERE first_name =\", sqli.esc(first_name))\n ('SELECT * FROM table WHERE first_name = ?', ('John',))\n \"\"\"\n if type(val) in self.type_map:\n return self.type_map[type(val)](val)\n else:\n return Esc(val)\n",
"def _append_sql(sql, part):\n \"Handle whitespace when appending properly.\"\n if len(sql) == 0:\n return part\n elif sql[-1] == ' ':\n return sql + part\n else:\n return sql + ' ' + part\n"
] | class SQLInterp(object):
"""
The main sql_interp object.
"""
def __init__(self, new_types=None):
self.type_map = {
list: ListEsc,
tuple: ListEsc,
dict: DictEsc,
}
if new_types:
self.type_map.update(new_types)
def esc(self, val):
"""
Returns the given object in the appropriate wrapper class from esc_types.py.
In most cases, you will not need to call this directly. However, if you are
passing a string to the interp method that should be used as an SQL bind value
and not raw SQL, you must pass it to this method to avoid a SQL injection
vulnerability. For example:
>>> sqli = SQLInterp()
>>> first_name = 'John'
The following is wrong! This could lead to a SQL injection attack.
>>> sqli.interp("SELECT * FROM table WHERE first_name =", first_name)
('SELECT * FROM table WHERE first_name = John', ())
This is the correct way.
>>> sqli.interp("SELECT * FROM table WHERE first_name =", sqli.esc(first_name))
('SELECT * FROM table WHERE first_name = ?', ('John',))
"""
if type(val) in self.type_map:
return self.type_map[type(val)](val)
else:
return Esc(val)
def add_types(self, new_types):
"""
Add new custom types that can be interpolated by this object.
This method expects a dict that maps types (the keys) to their custom wrapper
classes (the values). The wrapper classes must be a descendant of the Esc class.
"""
self.type_map.update(new_types)
|
vilmibm/done | sql_interp/sql_interp.py | SQLInterp.esc | python | def esc(self, val):
if type(val) in self.type_map:
return self.type_map[type(val)](val)
else:
return Esc(val) | Returns the given object in the appropriate wrapper class from esc_types.py.
In most cases, you will not need to call this directly. However, if you are
passing a string to the interp method that should be used as an SQL bind value
and not raw SQL, you must pass it to this method to avoid a SQL injection
vulnerability. For example:
>>> sqli = SQLInterp()
>>> first_name = 'John'
The following is wrong! This could lead to a SQL injection attack.
>>> sqli.interp("SELECT * FROM table WHERE first_name =", first_name)
('SELECT * FROM table WHERE first_name = John', ())
This is the correct way.
>>> sqli.interp("SELECT * FROM table WHERE first_name =", sqli.esc(first_name))
('SELECT * FROM table WHERE first_name = ?', ('John',)) | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/sql_interp/sql_interp.py#L52-L77 | null | class SQLInterp(object):
"""
The main sql_interp object.
"""
def __init__(self, new_types=None):
self.type_map = {
list: ListEsc,
tuple: ListEsc,
dict: DictEsc,
}
if new_types:
self.type_map.update(new_types)
def interp(self, *args):
"""
This method takes a list of SQL snippets and returns a SQL statement and
a list of bind variables to be passed to the DB API's execute method.
"""
sql = ""
bind = ()
def _append_sql(sql, part):
"Handle whitespace when appending properly."
if len(sql) == 0:
return part
elif sql[-1] == ' ':
return sql + part
else:
return sql + ' ' + part
for arg in args:
if type(arg) is str:
# Strings are treated as raw SQL.
sql = _append_sql(sql, arg)
elif isinstance(arg, Esc):
# If this is an instance of Esc, ask the object
# how to represent the data given the context.
arg_sql, arg_bind = arg.to_string(sql)
sql = _append_sql(sql, arg_sql)
bind += arg_bind
else:
# Any argument given that is not a string or Esc
# is an error.
arg_sql, arg_bind = self.esc(arg).to_string(sql)
sql = _append_sql(sql, arg_sql)
bind += arg_bind
return (sql, bind)
def add_types(self, new_types):
"""
Add new custom types that can be interpolated by this object.
This method expects a dict that maps types (the keys) to their custom wrapper
classes (the values). The wrapper classes must be a descendant of the Esc class.
"""
self.type_map.update(new_types)
|
vilmibm/done | done/Commands.py | run | python | def run(command, options, args):
if command == "backend":
subprocess.call(("sqlite3", db_path))
if command == "add":
dp = pdt.Calendar()
due = mktime(dp.parse(options.due)[0]) if options.due else None
print "added tasks..."
[Task(desc, due).add() for desc in args]
return
filters = args if len(args) else None
rows = Query(filters, options).find()
tasks = [Task(r["desc"], r["due"]) for r in rows]
if command == "list":
for t in tasks:
print "\t *", t
if command == "done":
print "done with..."
finished_tasks = []
for t in tasks:
finished = t.done()
if finished:
finished_tasks.append(t)
if not finished_tasks:
return
print ""
print "finished tasks:"
for t in finished_tasks:
print "\t X", t
if command == "remove":
print "remove..."
removed_tasks = []
for t in tasks:
removed = t.remove()
if removed:
removed_tasks.append(t)
if not removed_tasks:
return
print ""
print "removed tasks:"
for t in removed_tasks:
print "\t RM", t | Run the requested command. args is either a list of descriptions or a list of strings to filter by | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/done/Commands.py#L25-L76 | [
"def find(self):\n criteria = {\"done\": 0}\n if self.finished:\n criteria[\"done\"] = 1\n if self.due:\n criteria[\"due\"] = mktime(self.dp.parse(self.due)[0])\n\n interped = self.si.interp(\"SELECT desc, due FROM tasks WHERE\", criteria, \"ORDER BY\", self.sort, \"DESC\")\n\n self.c.execute(interped[0], interped[1])\n\n rows = []\n for row in self.c:\n rows.append({\"desc\":row[0], \"due\":row[1]})\n\n if not self.filters:\n return rows\n\n filtset = set(self.filters)\n\n matches = filter(lambda r: set(r[\"desc\"].split()) >= filtset, rows)\n\n return matches\n",
"def parse(self, datetimeString, sourceTime=None):\n \"\"\"\n Splits the given C{datetimeString} into tokens, finds the regex\n patterns that match and then calculates a C{struct_time} value from\n the chunks.\n\n If C{sourceTime} is given then the C{struct_time} value will be\n calculated from that value, otherwise from the current date/time.\n\n If the C{datetimeString} is parsed and date/time value found then\n the second item of the returned tuple will be a flag to let you know\n what kind of C{struct_time} value is being returned::\n\n 0 = not parsed at all\n 1 = parsed as a C{date}\n 2 = parsed as a C{time}\n 3 = parsed as a C{datetime}\n\n @type datetimeString: string\n @param datetimeString: date/time text to evaluate\n @type sourceTime: struct_time\n @param sourceTime: C{struct_time} value to use as the base\n\n @rtype: tuple\n @return: tuple of: modified C{sourceTime} and the result flag\n \"\"\"\n\n if sourceTime:\n if isinstance(sourceTime, datetime.datetime):\n if _debug:\n print 'coercing datetime to timetuple'\n sourceTime = sourceTime.timetuple()\n else:\n if not isinstance(sourceTime, time.struct_time) and \\\n not isinstance(sourceTime, tuple):\n raise Exception('sourceTime is not a struct_time')\n\n s = datetimeString.strip().lower()\n parseStr = ''\n totalTime = sourceTime\n\n if s == '' :\n if sourceTime is not None:\n return (sourceTime, self.dateFlag + self.timeFlag)\n else:\n return (time.localtime(), 0)\n\n self.timeFlag = 0\n self.dateFlag = 0\n\n while len(s) > 0:\n flag = False\n chunk1 = ''\n chunk2 = ''\n\n if _debug:\n print 'parse (top of loop): [%s][%s]' % (s, parseStr)\n\n if parseStr == '':\n # Modifier like next\\prev..\n m = self.ptc.CRE_MODIFIER.search(s)\n if m is not None:\n self.modifierFlag = True\n if (m.group('modifier') != s):\n # capture remaining string\n parseStr = m.group('modifier')\n chunk1 = s[:m.start('modifier')].strip()\n chunk2 = s[m.end('modifier'):].strip()\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Modifier like from\\after\\prior..\n m = self.ptc.CRE_MODIFIER2.search(s)\n if m is not None:\n self.modifier2Flag = True\n if (m.group('modifier') != s):\n # capture remaining string\n parseStr = m.group('modifier')\n chunk1 = s[:m.start('modifier')].strip()\n chunk2 = s[m.end('modifier'):].strip()\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n valid_date = False\n for match in self.ptc.CRE_DATE3.finditer(s):\n # to prevent \"HH:MM(:SS) time strings\" expressions from triggering\n # this regex, we checks if the month field exists in the searched \n # expression, if it doesn't exist, the date field is not valid\n if match.group('mthname'):\n m = self.ptc.CRE_DATE3.search(s, match.start())\n valid_date = True\n break\n\n # String date format\n if valid_date:\n self.dateStrFlag = True\n self.dateFlag = 1\n if (m.group('date') != s):\n # capture remaining string\n parseStr = m.group('date')\n chunk1 = s[:m.start('date')]\n chunk2 = s[m.end('date'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Standard date format\n m = self.ptc.CRE_DATE.search(s)\n if m is not None:\n self.dateStdFlag = True\n self.dateFlag = 1\n if (m.group('date') != s):\n # capture remaining string\n parseStr = m.group('date')\n chunk1 = s[:m.start('date')]\n chunk2 = s[m.end('date'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Natural language day strings\n m = self.ptc.CRE_DAY.search(s)\n if m is not None:\n self.dayStrFlag = True\n self.dateFlag = 1\n if (m.group('day') != s):\n # capture remaining string\n parseStr = m.group('day')\n chunk1 = s[:m.start('day')]\n chunk2 = s[m.end('day'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Quantity + Units\n m = self.ptc.CRE_UNITS.search(s)\n if m is not None:\n self.unitsFlag = True\n if (m.group('qty') != s):\n # capture remaining string\n parseStr = m.group('qty')\n chunk1 = s[:m.start('qty')].strip()\n chunk2 = s[m.end('qty'):].strip()\n\n if chunk1[-1:] == '-':\n parseStr = '-%s' % parseStr\n chunk1 = chunk1[:-1]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Quantity + Units\n m = self.ptc.CRE_QUNITS.search(s)\n if m is not None:\n self.qunitsFlag = True\n\n if (m.group('qty') != s):\n # capture remaining string\n parseStr = m.group('qty')\n chunk1 = s[:m.start('qty')].strip()\n chunk2 = s[m.end('qty'):].strip()\n\n if chunk1[-1:] == '-':\n parseStr = '-%s' % parseStr\n chunk1 = chunk1[:-1]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s \n\n if parseStr == '':\n # Weekday\n m = self.ptc.CRE_WEEKDAY.search(s)\n if m is not None:\n gv = m.group('weekday')\n if s not in self.ptc.dayOffsets:\n self.weekdyFlag = True\n self.dateFlag = 1\n if (gv != s):\n # capture remaining string\n parseStr = gv\n chunk1 = s[:m.start('weekday')]\n chunk2 = s[m.end('weekday'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Natural language time strings\n m = self.ptc.CRE_TIME.search(s)\n if m is not None:\n self.timeStrFlag = True\n self.timeFlag = 2\n if (m.group('time') != s):\n # capture remaining string\n parseStr = m.group('time')\n chunk1 = s[:m.start('time')]\n chunk2 = s[m.end('time'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # HH:MM(:SS) am/pm time strings\n m = self.ptc.CRE_TIMEHMS2.search(s)\n if m is not None:\n self.meridianFlag = True\n self.timeFlag = 2\n if m.group('minutes') is not None:\n if m.group('seconds') is not None:\n parseStr = '%s:%s:%s %s' % (m.group('hours'),\n m.group('minutes'),\n m.group('seconds'),\n m.group('meridian'))\n else:\n parseStr = '%s:%s %s' % (m.group('hours'),\n m.group('minutes'),\n m.group('meridian'))\n else:\n parseStr = '%s %s' % (m.group('hours'),\n m.group('meridian'))\n\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('meridian'):]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n\n if parseStr == '':\n # HH:MM(:SS) time strings\n m = self.ptc.CRE_TIMEHMS.search(s)\n if m is not None:\n self.timeStdFlag = True\n self.timeFlag = 2\n if m.group('seconds') is not None:\n parseStr = '%s:%s:%s' % (m.group('hours'),\n m.group('minutes'),\n m.group('seconds'))\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('seconds'):]\n else:\n parseStr = '%s:%s' % (m.group('hours'),\n m.group('minutes'))\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('minutes'):]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n\n # if string does not match any regex, empty string to\n # come out of the while loop\n if not flag:\n s = ''\n\n if _debug:\n print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)\n print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \\\n (self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)\n print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \\\n (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)\n\n # evaluate the matched string\n if parseStr != '':\n if self.modifierFlag == True:\n t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)\n # t is the unparsed part of the chunks.\n # If it is not date/time, return current\n # totalTime as it is; else return the output\n # after parsing t.\n if (t != '') and (t != None):\n tempDateFlag = self.dateFlag\n tempTimeFlag = self.timeFlag\n (totalTime2, flag) = self.parse(t, totalTime)\n\n if flag == 0 and totalTime is not None:\n self.timeFlag = tempTimeFlag\n self.dateFlag = tempDateFlag\n\n return (totalTime, self.dateFlag + self.timeFlag)\n else:\n return (totalTime2, self.dateFlag + self.timeFlag)\n\n elif self.modifier2Flag == True:\n totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)\n\n if invalidFlag == True:\n self.dateFlag = 0\n self.timeFlag = 0\n\n else:\n totalTime = self._evalString(parseStr, totalTime)\n parseStr = ''\n\n # String is not parsed at all\n if totalTime is None or totalTime == sourceTime:\n totalTime = time.localtime()\n self.dateFlag = 0\n self.timeFlag = 0\n\n return (totalTime, self.dateFlag + self.timeFlag)\n"
] | # who nate smith
# when march 2010
# why the done tool
# where midwest usa
import subprocess
from time import mktime
import parsedatetime.parsedatetime as pdt
from Tasks import Task
from Queries import Query
from Config import db_path
commands = ['list', 'done', 'remove', 'add', 'backend']
num_args = {
"list" : lambda n: n >= 0,
"done" : lambda n: n >= 0,
"remove" : lambda n: n >= 0,
"add" : lambda n: n >= 1,
"backend" : lambda n: n == 0
}
def run(command, options, args):
"""Run the requested command. args is either a list of descriptions or a list of strings to filter by"""
if command == "backend":
subprocess.call(("sqlite3", db_path))
if command == "add":
dp = pdt.Calendar()
due = mktime(dp.parse(options.due)[0]) if options.due else None
print "added tasks..."
[Task(desc, due).add() for desc in args]
return
filters = args if len(args) else None
rows = Query(filters, options).find()
tasks = [Task(r["desc"], r["due"]) for r in rows]
if command == "list":
for t in tasks:
print "\t *", t
if command == "done":
print "done with..."
finished_tasks = []
for t in tasks:
finished = t.done()
if finished:
finished_tasks.append(t)
if not finished_tasks:
return
print ""
print "finished tasks:"
for t in finished_tasks:
print "\t X", t
if command == "remove":
print "remove..."
removed_tasks = []
for t in tasks:
removed = t.remove()
if removed:
removed_tasks.append(t)
if not removed_tasks:
return
print ""
print "removed tasks:"
for t in removed_tasks:
print "\t RM", t
class ArgError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def match(command, options, args):
"""disambiguate a command (expanding, for eg, lis into list) and validate the number of args passed for it"""
build = ""
possible = commands
for l in command:
build += l
possible = filter(lambda w: w.startswith(build), possible)
if len(possible) == 0:
raise ArgError("Command invalid: %s" % command)
if len(possible) > 1:
raise ArgError("Ambiguous command: %s" % command)
command = possible.pop()
if not num_args[command](len(args)):
raise ArgError("Bad number of args for command %s" % command)
return command
|
vilmibm/done | done/Commands.py | match | python | def match(command, options, args):
build = ""
possible = commands
for l in command:
build += l
possible = filter(lambda w: w.startswith(build), possible)
if len(possible) == 0:
raise ArgError("Command invalid: %s" % command)
if len(possible) > 1:
raise ArgError("Ambiguous command: %s" % command)
command = possible.pop()
if not num_args[command](len(args)):
raise ArgError("Bad number of args for command %s" % command)
return command | disambiguate a command (expanding, for eg, lis into list) and validate the number of args passed for it | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/done/Commands.py#L84-L103 | null | # who nate smith
# when march 2010
# why the done tool
# where midwest usa
import subprocess
from time import mktime
import parsedatetime.parsedatetime as pdt
from Tasks import Task
from Queries import Query
from Config import db_path
commands = ['list', 'done', 'remove', 'add', 'backend']
num_args = {
"list" : lambda n: n >= 0,
"done" : lambda n: n >= 0,
"remove" : lambda n: n >= 0,
"add" : lambda n: n >= 1,
"backend" : lambda n: n == 0
}
def run(command, options, args):
"""Run the requested command. args is either a list of descriptions or a list of strings to filter by"""
if command == "backend":
subprocess.call(("sqlite3", db_path))
if command == "add":
dp = pdt.Calendar()
due = mktime(dp.parse(options.due)[0]) if options.due else None
print "added tasks..."
[Task(desc, due).add() for desc in args]
return
filters = args if len(args) else None
rows = Query(filters, options).find()
tasks = [Task(r["desc"], r["due"]) for r in rows]
if command == "list":
for t in tasks:
print "\t *", t
if command == "done":
print "done with..."
finished_tasks = []
for t in tasks:
finished = t.done()
if finished:
finished_tasks.append(t)
if not finished_tasks:
return
print ""
print "finished tasks:"
for t in finished_tasks:
print "\t X", t
if command == "remove":
print "remove..."
removed_tasks = []
for t in tasks:
removed = t.remove()
if removed:
removed_tasks.append(t)
if not removed_tasks:
return
print ""
print "removed tasks:"
for t in removed_tasks:
print "\t RM", t
class ArgError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def match(command, options, args):
"""disambiguate a command (expanding, for eg, lis into list) and validate the number of args passed for it"""
build = ""
possible = commands
for l in command:
build += l
possible = filter(lambda w: w.startswith(build), possible)
if len(possible) == 0:
raise ArgError("Command invalid: %s" % command)
if len(possible) > 1:
raise ArgError("Ambiguous command: %s" % command)
command = possible.pop()
if not num_args[command](len(args)):
raise ArgError("Bad number of args for command %s" % command)
return command
|
vilmibm/done | done/termcolor.py | colored | python | def colored(text, color=None, on_color=None, attrs=None):
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[1;%dm%s'
if color is not None:
text = fmt_str % (colors[color], text)
if on_color is not None:
text = fmt_str % (highlights[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (attributes[attr], text)
reset = '\033[1;m'
text += reset
return text | Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green') | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/done/termcolor.py#L75-L106 | null | # Copyright (C) 2008 Konstantin Lepa <konstantin.lepa@gmail.com>.
#
# This file is part of termcolor.
#
# termcolor is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# termcolor is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License
# along with termcolor. If not, see <http://www.gnu.org/licenses/>.
"""ANSII Color formatting for output in terminal."""
import os
__ALL__ = [ 'colored' ]
attributes = dict(
zip([
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
range(1, 9)
)
)
del attributes['']
highlights = dict(
zip([
'on_grey',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_white'
],
range(40, 48)
)
)
colors = dict(
zip([
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
],
range(30, 38)
)
)
if __name__ == '__main__':
print 'Current terminal type: ', os.getenv('TERM')
print 'Test basic colors:'
print colored('Grey color', 'grey')
print colored('Red color', 'red')
print colored('Green color', 'green')
print colored('Yellow color', 'yellow')
print colored('Blue color', 'blue')
print colored('Magenta color', 'magenta')
print colored('Cyan color', 'cyan')
print colored('White color', 'white')
print '-' * 78
print 'Test highlights:'
print colored('On grey color', on_color='on_grey')
print colored('On red color', on_color='on_red')
print colored('On green color', on_color='on_green')
print colored('On yellow color', on_color='on_yellow')
print colored('On blue color', on_color='on_blue')
print colored('On magenta color', on_color='on_magenta')
print colored('On cyan color', on_color='on_cyan')
print colored('On white color', color='grey', on_color='on_white')
print '-' * 78
print 'Test attributes:'
print colored('Bold grey color', 'grey', attrs=['bold'])
print colored('Dark red color', 'red', attrs=['dark'])
print colored('Underline green color', 'green', attrs=['underline'])
print colored('Blink yellow color', 'yellow', attrs=['blink'])
print colored('Reversed blue color', 'blue', attrs=['reverse'])
print colored('Concealed Magenta color', 'magenta', attrs=['concealed'])
print colored('Bold underline reverse cyan color', 'cyan',
attrs=['bold', 'underline', 'reverse'])
print colored('Dark blink concealed white color', 'white',
attrs=['dark', 'blink', 'concealed'])
print '-' * 78
print 'Test mixing:'
print colored('Underline red on grey color', 'red', 'on_grey',
['underline'])
print colored('Reversed green on red color', 'green', 'on_red', ['reverse'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.