repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
bbangert/lettuce_webdriver | lettuce_webdriver/webdriver.py | submit_form_action | python | def submit_form_action(step, url):
form = world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
form.submit() | Submit the form having given action URL. | train | https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/webdriver.py#L326-L332 | null | """Webdriver support for lettuce"""
from lettuce import step, world
from lettuce_webdriver.util import (assert_true,
assert_false,
AssertContextManager,
find_any_field,
find_button,
find_field,
find_option,
option_in_select,
wait_for)
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from nose.tools import assert_equals
# pylint:disable=missing-docstring,redefined-outer-name
from css_selector_steps import *
def contains_content(browser, content):
# Search for an element that contains the whole of the text we're looking
# for in it or its subelements, but whose children do NOT contain that
# text - otherwise matches <body> or <html> or other similarly useless
# things.
for elem in browser.find_elements_by_xpath(unicode(
u'//*[contains(normalize-space(.),"{content}") '
u'and not(./*[contains(normalize-space(.),"{content}")])]'
.format(content=content))):
try:
if elem.is_displayed():
return True
except StaleElementReferenceException:
pass
return False
@wait_for
def wait_for_elem(browser, xpath):
return browser.find_elements_by_xpath(str(xpath))
@wait_for
def wait_for_content(browser, content):
return contains_content(browser, content)
## URLS
@step('I visit "(.*?)"$')
def visit(step, url):
with AssertContextManager(step):
world.browser.get(url)
@step('I go to "(.*?)"$')
def goto(step, url):
step.given('I visit "%s"' % url)
## Links
@step('I click "(.*?)"$')
def click(step, name):
with AssertContextManager(step):
elem = world.browser.find_element_by_link_text(name)
elem.click()
@step('I click by id "(.*?)"$')
def click_by_id(step, id_name):
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str('id("%s")' % id_name))
elem.click()
@step('I should see a link with the url "(.*?)"$')
def should_see_link(step, link_url):
assert_true(step, world.browser.
find_element_by_xpath(str('//a[@href="%s"]' % link_url)))
@step('I should see a link to "(.*?)" with the url "(.*?)"$')
def should_see_link_text(step, link_text, link_url):
assert_true(step,
world.browser.find_element_by_xpath(str(
'//a[@href="%s"][./text()="%s"]' %
(link_url, link_text))))
@step('I should see a link that contains the text "(.*?)" '
'and the url "(.*?)"$')
def should_include_link_text(step, link_text, link_url):
return world.browser.find_element_by_xpath(str(
'//a[@href="%s"][contains(., "%s")]' %
(link_url, link_text)))
## General
@step('The element with id of "(.*?)" contains "(.*?)"$')
def element_contains(step, element_id, value):
return world.browser.find_element_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
@step('The element with id of "(.*?)" does not contain "(.*?)"$')
def element_not_contains(step, element_id, value):
elem = world.browser.find_elements_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
assert_false(step, elem)
@wait_for
def wait_for_visible_elem(browser, xpath):
elem = browser.find_elements_by_xpath(str(xpath))
if not elem:
return False
return elem[0].is_displayed()
@step(r'I should see an element with id of "(.*?)" within (\d+) seconds?$')
def should_see_id_in_seconds(step, element_id, timeout):
elem = wait_for_visible_elem(world.browser, 'id("%s")' % element_id,
timeout=int(timeout))
assert_true(step, elem)
@step('I should see an element with id of "(.*?)"$')
def should_see_id(step, element_id):
elem = world.browser.find_element_by_xpath(str('id("%s")' % element_id))
assert_true(step, elem.is_displayed())
@step('I should not see an element with id of "(.*?)"$')
def should_not_see_id(step, element_id):
try:
elem = world.browser.find_element_by_xpath(str('id("%s")' %
element_id))
assert_true(step, not elem.is_displayed())
except NoSuchElementException:
pass
@step(r'I should see "([^"]+)" within (\d+) seconds?$')
def should_see_in_seconds(step, text, timeout):
assert_true(step,
wait_for_content(world.browser, text, timeout=int(timeout)))
@step('I should see "([^"]+)"$')
def should_see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I see "([^"]+)"$')
def see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I should not see "([^"]+)"$')
def should_not_see(step, text):
assert_true(step, not contains_content(world.browser, text))
@step('I should be at "(.*?)"$')
def url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
## Browser
@step('The browser\'s URL should be "(.*?)"$')
def browser_url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
@step('The browser\'s URL should contain "(.*?)"$')
def url_should_contain(step, url):
assert_true(step, url in world.browser.current_url)
@step('The browser\'s URL should not contain "(.*?)"$')
def url_should_not_contain(step, url):
assert_true(step, url not in world.browser.current_url)
## Forms
@step('I should see a form that goes to "(.*?)"$')
def see_form(step, url):
return world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
DATE_FIELDS = (
'datetime',
'datetime-local',
'date',
)
TEXT_FIELDS = (
'text',
'textarea',
'password',
'month',
'time',
'week',
'number',
'range',
'email',
'url',
'tel',
'color',
)
@step('I fill in "(.*?)" with "(.*?)"$')
def fill_in_textfield(step, field_name, value):
with AssertContextManager(step):
date_field = find_any_field(world.browser,
DATE_FIELDS,
field_name)
if date_field:
field = date_field
else:
field = find_any_field(world.browser,
TEXT_FIELDS,
field_name)
assert_true(step, field,
'Can not find a field named "%s"' % field_name)
if date_field:
field.send_keys(Keys.DELETE)
else:
field.clear()
field.send_keys(value)
@step('I press "(.*?)"$')
def press_button(step, value):
with AssertContextManager(step):
button = find_button(world.browser, value)
button.click()
@step('I click on label "([^"]*)"')
def click_on_label(step, label):
"""
Click on a label
"""
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str(
'//label[normalize-space(text()) = "%s"]' % label))
elem.click()
@step(r'Element with id "([^"]*)" should be focused')
def element_focused(step, id):
"""
Check if the element is focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_true(step, elem == focused)
@step(r'Element with id "([^"]*)" should not be focused')
def element_not_focused(step, id):
"""
Check if the element is not focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_false(step, elem == focused)
@step(r'Input "([^"]*)" (?:has|should have) value "([^"]*)"')
def input_has_value(step, field_name, value):
"""
Check that the form input element has given value.
"""
with AssertContextManager(step):
text_field = find_any_field(world.browser,
DATE_FIELDS + TEXT_FIELDS,
field_name)
assert_false(step, text_field is False,
'Can not find a field named "%s"' % field_name)
assert_equals(text_field.get_attribute('value'), value)
@step(r'I submit the only form')
def submit_the_only_form(step):
"""
Look for a form on the page and submit it.
"""
form = world.browser.find_element_by_xpath(str('//form'))
form.submit()
@step(r'I submit the form with id "([^"]*)"')
def submit_form_id(step, id):
"""
Submit the form having given id.
"""
form = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
form.submit()
@step(r'I submit the form with action "([^"]*)"')
# Checkboxes
@step('I check "(.*?)"$')
def check_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if not check_box.is_selected():
check_box.click()
@step('I uncheck "(.*?)"$')
def uncheck_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if check_box.is_selected():
check_box.click()
@step('The "(.*?)" checkbox should be checked$')
def assert_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, check_box.is_selected())
@step('The "(.*?)" checkbox should not be checked$')
def assert_not_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, not check_box.is_selected())
# Selectors
@step('I select "(.*?)" from "(.*?)"$')
def select_single_item(step, option_name, select_name):
with AssertContextManager(step):
option_box = find_option(world.browser, select_name, option_name)
option_box.click()
@step('I select the following from "([^"]*?)":?$')
def select_multi_items(step, select_name):
with AssertContextManager(step):
# Ensure only the options selected are actually selected
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
select = Select(select_box)
select.deselect_all()
for option in option_names:
try:
select.select_by_value(option)
except NoSuchElementException:
select.select_by_visible_text(option)
@step('The "(.*?)" option from "(.*?)" should be selected$')
def assert_single_selected(step, option_name, select_name):
option_box = find_option(world.browser, select_name, option_name)
assert_true(step, option_box.is_selected())
@step('The following options from "([^"]*?)" should be selected:?$')
def assert_multi_selected(step, select_name):
with AssertContextManager(step):
# Ensure its not selected unless its one of our options
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
option_elems = select_box.find_elements_by_xpath(str('./option'))
for option in option_elems:
if option.get_attribute('id') in option_names or \
option.get_attribute('name') in option_names or \
option.get_attribute('value') in option_names or \
option.text in option_names:
assert_true(step, option.is_selected())
else:
assert_true(step, not option.is_selected())
@step(r'I should see option "([^"]*)" in selector "([^"]*)"')
def select_contains(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is not None)
@step(r'I should not see option "([^"]*)" in selector "([^"]*)"')
def select_does_not_contain(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is None)
## Radios
@step('I choose "(.*?)"$')
def choose_radio(step, value):
with AssertContextManager(step):
box = find_field(world.browser, 'radio', value)
box.click()
@step('The "(.*?)" option should be chosen$')
def assert_radio_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, box.is_selected())
@step('The "(.*?)" option should not be chosen$')
def assert_radio_not_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, not box.is_selected())
# Alerts
@step('I accept the alert')
def accept_alert(step):
"""
Accept the alert
"""
try:
alert = Alert(world.browser)
alert.accept()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I dismiss the alert')
def dismiss_alert(step):
"""
Dismiss the alert
"""
try:
alert = Alert(world.browser)
alert.dismiss()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step(r'I should see an alert with text "([^"]*)"')
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I should not see an alert')
def check_no_alert(step):
"""
Check there is no alert
"""
try:
alert = Alert(world.browser)
raise AssertionError("Should not see an alert. Alert '%s' shown." %
alert.text)
except NoAlertPresentException:
pass
# Tooltips
@step(r'I should see an element with tooltip "([^"]*)"')
def see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, elem)
@step(r'I should not see an element with tooltip "([^"]*)"')
def no_see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, not elem)
@step(r'I (?:click|press) the element with tooltip "([^"]*)"')
def press_by_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
with AssertContextManager(step):
for button in world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip))):
try:
button.click()
break
except Exception:
pass
else:
raise AssertionError("No button with tooltip '{0}' found"
.format(tooltip))
@step(r'The page title should be "([^"]*)"')
def page_title(step, title):
"""
Check that the page title matches the given one.
"""
with AssertContextManager(step):
assert_equals(world.browser.title, title)
@step(r'I switch to the frame with id "([^"]*)"')
def switch_to_frame(self, frame):
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to_frame(elem)
@step(r'I switch back to the main view')
def switch_to_main(self):
world.browser.switch_to_default_content()
|
bbangert/lettuce_webdriver | lettuce_webdriver/webdriver.py | check_alert | python | def check_alert(step, text):
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass | Check the alert text | train | https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/webdriver.py#L472-L482 | null | """Webdriver support for lettuce"""
from lettuce import step, world
from lettuce_webdriver.util import (assert_true,
assert_false,
AssertContextManager,
find_any_field,
find_button,
find_field,
find_option,
option_in_select,
wait_for)
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from nose.tools import assert_equals
# pylint:disable=missing-docstring,redefined-outer-name
from css_selector_steps import *
def contains_content(browser, content):
# Search for an element that contains the whole of the text we're looking
# for in it or its subelements, but whose children do NOT contain that
# text - otherwise matches <body> or <html> or other similarly useless
# things.
for elem in browser.find_elements_by_xpath(unicode(
u'//*[contains(normalize-space(.),"{content}") '
u'and not(./*[contains(normalize-space(.),"{content}")])]'
.format(content=content))):
try:
if elem.is_displayed():
return True
except StaleElementReferenceException:
pass
return False
@wait_for
def wait_for_elem(browser, xpath):
return browser.find_elements_by_xpath(str(xpath))
@wait_for
def wait_for_content(browser, content):
return contains_content(browser, content)
## URLS
@step('I visit "(.*?)"$')
def visit(step, url):
with AssertContextManager(step):
world.browser.get(url)
@step('I go to "(.*?)"$')
def goto(step, url):
step.given('I visit "%s"' % url)
## Links
@step('I click "(.*?)"$')
def click(step, name):
with AssertContextManager(step):
elem = world.browser.find_element_by_link_text(name)
elem.click()
@step('I click by id "(.*?)"$')
def click_by_id(step, id_name):
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str('id("%s")' % id_name))
elem.click()
@step('I should see a link with the url "(.*?)"$')
def should_see_link(step, link_url):
assert_true(step, world.browser.
find_element_by_xpath(str('//a[@href="%s"]' % link_url)))
@step('I should see a link to "(.*?)" with the url "(.*?)"$')
def should_see_link_text(step, link_text, link_url):
assert_true(step,
world.browser.find_element_by_xpath(str(
'//a[@href="%s"][./text()="%s"]' %
(link_url, link_text))))
@step('I should see a link that contains the text "(.*?)" '
'and the url "(.*?)"$')
def should_include_link_text(step, link_text, link_url):
return world.browser.find_element_by_xpath(str(
'//a[@href="%s"][contains(., "%s")]' %
(link_url, link_text)))
## General
@step('The element with id of "(.*?)" contains "(.*?)"$')
def element_contains(step, element_id, value):
return world.browser.find_element_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
@step('The element with id of "(.*?)" does not contain "(.*?)"$')
def element_not_contains(step, element_id, value):
elem = world.browser.find_elements_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
assert_false(step, elem)
@wait_for
def wait_for_visible_elem(browser, xpath):
elem = browser.find_elements_by_xpath(str(xpath))
if not elem:
return False
return elem[0].is_displayed()
@step(r'I should see an element with id of "(.*?)" within (\d+) seconds?$')
def should_see_id_in_seconds(step, element_id, timeout):
elem = wait_for_visible_elem(world.browser, 'id("%s")' % element_id,
timeout=int(timeout))
assert_true(step, elem)
@step('I should see an element with id of "(.*?)"$')
def should_see_id(step, element_id):
elem = world.browser.find_element_by_xpath(str('id("%s")' % element_id))
assert_true(step, elem.is_displayed())
@step('I should not see an element with id of "(.*?)"$')
def should_not_see_id(step, element_id):
try:
elem = world.browser.find_element_by_xpath(str('id("%s")' %
element_id))
assert_true(step, not elem.is_displayed())
except NoSuchElementException:
pass
@step(r'I should see "([^"]+)" within (\d+) seconds?$')
def should_see_in_seconds(step, text, timeout):
assert_true(step,
wait_for_content(world.browser, text, timeout=int(timeout)))
@step('I should see "([^"]+)"$')
def should_see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I see "([^"]+)"$')
def see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I should not see "([^"]+)"$')
def should_not_see(step, text):
assert_true(step, not contains_content(world.browser, text))
@step('I should be at "(.*?)"$')
def url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
## Browser
@step('The browser\'s URL should be "(.*?)"$')
def browser_url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
@step('The browser\'s URL should contain "(.*?)"$')
def url_should_contain(step, url):
assert_true(step, url in world.browser.current_url)
@step('The browser\'s URL should not contain "(.*?)"$')
def url_should_not_contain(step, url):
assert_true(step, url not in world.browser.current_url)
## Forms
@step('I should see a form that goes to "(.*?)"$')
def see_form(step, url):
return world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
DATE_FIELDS = (
'datetime',
'datetime-local',
'date',
)
TEXT_FIELDS = (
'text',
'textarea',
'password',
'month',
'time',
'week',
'number',
'range',
'email',
'url',
'tel',
'color',
)
@step('I fill in "(.*?)" with "(.*?)"$')
def fill_in_textfield(step, field_name, value):
with AssertContextManager(step):
date_field = find_any_field(world.browser,
DATE_FIELDS,
field_name)
if date_field:
field = date_field
else:
field = find_any_field(world.browser,
TEXT_FIELDS,
field_name)
assert_true(step, field,
'Can not find a field named "%s"' % field_name)
if date_field:
field.send_keys(Keys.DELETE)
else:
field.clear()
field.send_keys(value)
@step('I press "(.*?)"$')
def press_button(step, value):
with AssertContextManager(step):
button = find_button(world.browser, value)
button.click()
@step('I click on label "([^"]*)"')
def click_on_label(step, label):
"""
Click on a label
"""
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str(
'//label[normalize-space(text()) = "%s"]' % label))
elem.click()
@step(r'Element with id "([^"]*)" should be focused')
def element_focused(step, id):
"""
Check if the element is focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_true(step, elem == focused)
@step(r'Element with id "([^"]*)" should not be focused')
def element_not_focused(step, id):
"""
Check if the element is not focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_false(step, elem == focused)
@step(r'Input "([^"]*)" (?:has|should have) value "([^"]*)"')
def input_has_value(step, field_name, value):
"""
Check that the form input element has given value.
"""
with AssertContextManager(step):
text_field = find_any_field(world.browser,
DATE_FIELDS + TEXT_FIELDS,
field_name)
assert_false(step, text_field is False,
'Can not find a field named "%s"' % field_name)
assert_equals(text_field.get_attribute('value'), value)
@step(r'I submit the only form')
def submit_the_only_form(step):
"""
Look for a form on the page and submit it.
"""
form = world.browser.find_element_by_xpath(str('//form'))
form.submit()
@step(r'I submit the form with id "([^"]*)"')
def submit_form_id(step, id):
"""
Submit the form having given id.
"""
form = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
form.submit()
@step(r'I submit the form with action "([^"]*)"')
def submit_form_action(step, url):
"""
Submit the form having given action URL.
"""
form = world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
form.submit()
# Checkboxes
@step('I check "(.*?)"$')
def check_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if not check_box.is_selected():
check_box.click()
@step('I uncheck "(.*?)"$')
def uncheck_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if check_box.is_selected():
check_box.click()
@step('The "(.*?)" checkbox should be checked$')
def assert_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, check_box.is_selected())
@step('The "(.*?)" checkbox should not be checked$')
def assert_not_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, not check_box.is_selected())
# Selectors
@step('I select "(.*?)" from "(.*?)"$')
def select_single_item(step, option_name, select_name):
with AssertContextManager(step):
option_box = find_option(world.browser, select_name, option_name)
option_box.click()
@step('I select the following from "([^"]*?)":?$')
def select_multi_items(step, select_name):
with AssertContextManager(step):
# Ensure only the options selected are actually selected
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
select = Select(select_box)
select.deselect_all()
for option in option_names:
try:
select.select_by_value(option)
except NoSuchElementException:
select.select_by_visible_text(option)
@step('The "(.*?)" option from "(.*?)" should be selected$')
def assert_single_selected(step, option_name, select_name):
option_box = find_option(world.browser, select_name, option_name)
assert_true(step, option_box.is_selected())
@step('The following options from "([^"]*?)" should be selected:?$')
def assert_multi_selected(step, select_name):
with AssertContextManager(step):
# Ensure its not selected unless its one of our options
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
option_elems = select_box.find_elements_by_xpath(str('./option'))
for option in option_elems:
if option.get_attribute('id') in option_names or \
option.get_attribute('name') in option_names or \
option.get_attribute('value') in option_names or \
option.text in option_names:
assert_true(step, option.is_selected())
else:
assert_true(step, not option.is_selected())
@step(r'I should see option "([^"]*)" in selector "([^"]*)"')
def select_contains(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is not None)
@step(r'I should not see option "([^"]*)" in selector "([^"]*)"')
def select_does_not_contain(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is None)
## Radios
@step('I choose "(.*?)"$')
def choose_radio(step, value):
with AssertContextManager(step):
box = find_field(world.browser, 'radio', value)
box.click()
@step('The "(.*?)" option should be chosen$')
def assert_radio_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, box.is_selected())
@step('The "(.*?)" option should not be chosen$')
def assert_radio_not_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, not box.is_selected())
# Alerts
@step('I accept the alert')
def accept_alert(step):
"""
Accept the alert
"""
try:
alert = Alert(world.browser)
alert.accept()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I dismiss the alert')
def dismiss_alert(step):
"""
Dismiss the alert
"""
try:
alert = Alert(world.browser)
alert.dismiss()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step(r'I should see an alert with text "([^"]*)"')
@step('I should not see an alert')
def check_no_alert(step):
"""
Check there is no alert
"""
try:
alert = Alert(world.browser)
raise AssertionError("Should not see an alert. Alert '%s' shown." %
alert.text)
except NoAlertPresentException:
pass
# Tooltips
@step(r'I should see an element with tooltip "([^"]*)"')
def see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, elem)
@step(r'I should not see an element with tooltip "([^"]*)"')
def no_see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, not elem)
@step(r'I (?:click|press) the element with tooltip "([^"]*)"')
def press_by_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
with AssertContextManager(step):
for button in world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip))):
try:
button.click()
break
except Exception:
pass
else:
raise AssertionError("No button with tooltip '{0}' found"
.format(tooltip))
@step(r'The page title should be "([^"]*)"')
def page_title(step, title):
"""
Check that the page title matches the given one.
"""
with AssertContextManager(step):
assert_equals(world.browser.title, title)
@step(r'I switch to the frame with id "([^"]*)"')
def switch_to_frame(self, frame):
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to_frame(elem)
@step(r'I switch back to the main view')
def switch_to_main(self):
world.browser.switch_to_default_content()
|
bbangert/lettuce_webdriver | lettuce_webdriver/webdriver.py | see_tooltip | python | def see_tooltip(step, tooltip):
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, elem) | Press a button having a given tooltip. | train | https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/webdriver.py#L501-L509 | [
"def assert_true(step, exp, msg=None):\n with AssertContextManager(step):\n nose_assert_true(exp, msg)\n"
] | """Webdriver support for lettuce"""
from lettuce import step, world
from lettuce_webdriver.util import (assert_true,
assert_false,
AssertContextManager,
find_any_field,
find_button,
find_field,
find_option,
option_in_select,
wait_for)
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from nose.tools import assert_equals
# pylint:disable=missing-docstring,redefined-outer-name
from css_selector_steps import *
def contains_content(browser, content):
# Search for an element that contains the whole of the text we're looking
# for in it or its subelements, but whose children do NOT contain that
# text - otherwise matches <body> or <html> or other similarly useless
# things.
for elem in browser.find_elements_by_xpath(unicode(
u'//*[contains(normalize-space(.),"{content}") '
u'and not(./*[contains(normalize-space(.),"{content}")])]'
.format(content=content))):
try:
if elem.is_displayed():
return True
except StaleElementReferenceException:
pass
return False
@wait_for
def wait_for_elem(browser, xpath):
return browser.find_elements_by_xpath(str(xpath))
@wait_for
def wait_for_content(browser, content):
return contains_content(browser, content)
## URLS
@step('I visit "(.*?)"$')
def visit(step, url):
with AssertContextManager(step):
world.browser.get(url)
@step('I go to "(.*?)"$')
def goto(step, url):
step.given('I visit "%s"' % url)
## Links
@step('I click "(.*?)"$')
def click(step, name):
with AssertContextManager(step):
elem = world.browser.find_element_by_link_text(name)
elem.click()
@step('I click by id "(.*?)"$')
def click_by_id(step, id_name):
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str('id("%s")' % id_name))
elem.click()
@step('I should see a link with the url "(.*?)"$')
def should_see_link(step, link_url):
assert_true(step, world.browser.
find_element_by_xpath(str('//a[@href="%s"]' % link_url)))
@step('I should see a link to "(.*?)" with the url "(.*?)"$')
def should_see_link_text(step, link_text, link_url):
assert_true(step,
world.browser.find_element_by_xpath(str(
'//a[@href="%s"][./text()="%s"]' %
(link_url, link_text))))
@step('I should see a link that contains the text "(.*?)" '
'and the url "(.*?)"$')
def should_include_link_text(step, link_text, link_url):
return world.browser.find_element_by_xpath(str(
'//a[@href="%s"][contains(., "%s")]' %
(link_url, link_text)))
## General
@step('The element with id of "(.*?)" contains "(.*?)"$')
def element_contains(step, element_id, value):
return world.browser.find_element_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
@step('The element with id of "(.*?)" does not contain "(.*?)"$')
def element_not_contains(step, element_id, value):
elem = world.browser.find_elements_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
assert_false(step, elem)
@wait_for
def wait_for_visible_elem(browser, xpath):
elem = browser.find_elements_by_xpath(str(xpath))
if not elem:
return False
return elem[0].is_displayed()
@step(r'I should see an element with id of "(.*?)" within (\d+) seconds?$')
def should_see_id_in_seconds(step, element_id, timeout):
elem = wait_for_visible_elem(world.browser, 'id("%s")' % element_id,
timeout=int(timeout))
assert_true(step, elem)
@step('I should see an element with id of "(.*?)"$')
def should_see_id(step, element_id):
elem = world.browser.find_element_by_xpath(str('id("%s")' % element_id))
assert_true(step, elem.is_displayed())
@step('I should not see an element with id of "(.*?)"$')
def should_not_see_id(step, element_id):
try:
elem = world.browser.find_element_by_xpath(str('id("%s")' %
element_id))
assert_true(step, not elem.is_displayed())
except NoSuchElementException:
pass
@step(r'I should see "([^"]+)" within (\d+) seconds?$')
def should_see_in_seconds(step, text, timeout):
assert_true(step,
wait_for_content(world.browser, text, timeout=int(timeout)))
@step('I should see "([^"]+)"$')
def should_see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I see "([^"]+)"$')
def see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I should not see "([^"]+)"$')
def should_not_see(step, text):
assert_true(step, not contains_content(world.browser, text))
@step('I should be at "(.*?)"$')
def url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
## Browser
@step('The browser\'s URL should be "(.*?)"$')
def browser_url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
@step('The browser\'s URL should contain "(.*?)"$')
def url_should_contain(step, url):
assert_true(step, url in world.browser.current_url)
@step('The browser\'s URL should not contain "(.*?)"$')
def url_should_not_contain(step, url):
assert_true(step, url not in world.browser.current_url)
## Forms
@step('I should see a form that goes to "(.*?)"$')
def see_form(step, url):
return world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
DATE_FIELDS = (
'datetime',
'datetime-local',
'date',
)
TEXT_FIELDS = (
'text',
'textarea',
'password',
'month',
'time',
'week',
'number',
'range',
'email',
'url',
'tel',
'color',
)
@step('I fill in "(.*?)" with "(.*?)"$')
def fill_in_textfield(step, field_name, value):
with AssertContextManager(step):
date_field = find_any_field(world.browser,
DATE_FIELDS,
field_name)
if date_field:
field = date_field
else:
field = find_any_field(world.browser,
TEXT_FIELDS,
field_name)
assert_true(step, field,
'Can not find a field named "%s"' % field_name)
if date_field:
field.send_keys(Keys.DELETE)
else:
field.clear()
field.send_keys(value)
@step('I press "(.*?)"$')
def press_button(step, value):
with AssertContextManager(step):
button = find_button(world.browser, value)
button.click()
@step('I click on label "([^"]*)"')
def click_on_label(step, label):
"""
Click on a label
"""
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str(
'//label[normalize-space(text()) = "%s"]' % label))
elem.click()
@step(r'Element with id "([^"]*)" should be focused')
def element_focused(step, id):
"""
Check if the element is focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_true(step, elem == focused)
@step(r'Element with id "([^"]*)" should not be focused')
def element_not_focused(step, id):
"""
Check if the element is not focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_false(step, elem == focused)
@step(r'Input "([^"]*)" (?:has|should have) value "([^"]*)"')
def input_has_value(step, field_name, value):
"""
Check that the form input element has given value.
"""
with AssertContextManager(step):
text_field = find_any_field(world.browser,
DATE_FIELDS + TEXT_FIELDS,
field_name)
assert_false(step, text_field is False,
'Can not find a field named "%s"' % field_name)
assert_equals(text_field.get_attribute('value'), value)
@step(r'I submit the only form')
def submit_the_only_form(step):
"""
Look for a form on the page and submit it.
"""
form = world.browser.find_element_by_xpath(str('//form'))
form.submit()
@step(r'I submit the form with id "([^"]*)"')
def submit_form_id(step, id):
"""
Submit the form having given id.
"""
form = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
form.submit()
@step(r'I submit the form with action "([^"]*)"')
def submit_form_action(step, url):
"""
Submit the form having given action URL.
"""
form = world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
form.submit()
# Checkboxes
@step('I check "(.*?)"$')
def check_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if not check_box.is_selected():
check_box.click()
@step('I uncheck "(.*?)"$')
def uncheck_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if check_box.is_selected():
check_box.click()
@step('The "(.*?)" checkbox should be checked$')
def assert_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, check_box.is_selected())
@step('The "(.*?)" checkbox should not be checked$')
def assert_not_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, not check_box.is_selected())
# Selectors
@step('I select "(.*?)" from "(.*?)"$')
def select_single_item(step, option_name, select_name):
with AssertContextManager(step):
option_box = find_option(world.browser, select_name, option_name)
option_box.click()
@step('I select the following from "([^"]*?)":?$')
def select_multi_items(step, select_name):
with AssertContextManager(step):
# Ensure only the options selected are actually selected
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
select = Select(select_box)
select.deselect_all()
for option in option_names:
try:
select.select_by_value(option)
except NoSuchElementException:
select.select_by_visible_text(option)
@step('The "(.*?)" option from "(.*?)" should be selected$')
def assert_single_selected(step, option_name, select_name):
option_box = find_option(world.browser, select_name, option_name)
assert_true(step, option_box.is_selected())
@step('The following options from "([^"]*?)" should be selected:?$')
def assert_multi_selected(step, select_name):
with AssertContextManager(step):
# Ensure its not selected unless its one of our options
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
option_elems = select_box.find_elements_by_xpath(str('./option'))
for option in option_elems:
if option.get_attribute('id') in option_names or \
option.get_attribute('name') in option_names or \
option.get_attribute('value') in option_names or \
option.text in option_names:
assert_true(step, option.is_selected())
else:
assert_true(step, not option.is_selected())
@step(r'I should see option "([^"]*)" in selector "([^"]*)"')
def select_contains(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is not None)
@step(r'I should not see option "([^"]*)" in selector "([^"]*)"')
def select_does_not_contain(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is None)
## Radios
@step('I choose "(.*?)"$')
def choose_radio(step, value):
with AssertContextManager(step):
box = find_field(world.browser, 'radio', value)
box.click()
@step('The "(.*?)" option should be chosen$')
def assert_radio_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, box.is_selected())
@step('The "(.*?)" option should not be chosen$')
def assert_radio_not_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, not box.is_selected())
# Alerts
@step('I accept the alert')
def accept_alert(step):
"""
Accept the alert
"""
try:
alert = Alert(world.browser)
alert.accept()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I dismiss the alert')
def dismiss_alert(step):
"""
Dismiss the alert
"""
try:
alert = Alert(world.browser)
alert.dismiss()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step(r'I should see an alert with text "([^"]*)"')
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I should not see an alert')
def check_no_alert(step):
"""
Check there is no alert
"""
try:
alert = Alert(world.browser)
raise AssertionError("Should not see an alert. Alert '%s' shown." %
alert.text)
except NoAlertPresentException:
pass
# Tooltips
@step(r'I should see an element with tooltip "([^"]*)"')
@step(r'I should not see an element with tooltip "([^"]*)"')
def no_see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, not elem)
@step(r'I (?:click|press) the element with tooltip "([^"]*)"')
def press_by_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
with AssertContextManager(step):
for button in world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip))):
try:
button.click()
break
except Exception:
pass
else:
raise AssertionError("No button with tooltip '{0}' found"
.format(tooltip))
@step(r'The page title should be "([^"]*)"')
def page_title(step, title):
"""
Check that the page title matches the given one.
"""
with AssertContextManager(step):
assert_equals(world.browser.title, title)
@step(r'I switch to the frame with id "([^"]*)"')
def switch_to_frame(self, frame):
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to_frame(elem)
@step(r'I switch back to the main view')
def switch_to_main(self):
world.browser.switch_to_default_content()
|
bbangert/lettuce_webdriver | lettuce_webdriver/webdriver.py | press_by_tooltip | python | def press_by_tooltip(step, tooltip):
with AssertContextManager(step):
for button in world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip))):
try:
button.click()
break
except Exception:
pass
else:
raise AssertionError("No button with tooltip '{0}' found"
.format(tooltip)) | Press a button having a given tooltip. | train | https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/webdriver.py#L525-L540 | null | """Webdriver support for lettuce"""
from lettuce import step, world
from lettuce_webdriver.util import (assert_true,
assert_false,
AssertContextManager,
find_any_field,
find_button,
find_field,
find_option,
option_in_select,
wait_for)
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from nose.tools import assert_equals
# pylint:disable=missing-docstring,redefined-outer-name
from css_selector_steps import *
def contains_content(browser, content):
# Search for an element that contains the whole of the text we're looking
# for in it or its subelements, but whose children do NOT contain that
# text - otherwise matches <body> or <html> or other similarly useless
# things.
for elem in browser.find_elements_by_xpath(unicode(
u'//*[contains(normalize-space(.),"{content}") '
u'and not(./*[contains(normalize-space(.),"{content}")])]'
.format(content=content))):
try:
if elem.is_displayed():
return True
except StaleElementReferenceException:
pass
return False
@wait_for
def wait_for_elem(browser, xpath):
return browser.find_elements_by_xpath(str(xpath))
@wait_for
def wait_for_content(browser, content):
return contains_content(browser, content)
## URLS
@step('I visit "(.*?)"$')
def visit(step, url):
with AssertContextManager(step):
world.browser.get(url)
@step('I go to "(.*?)"$')
def goto(step, url):
step.given('I visit "%s"' % url)
## Links
@step('I click "(.*?)"$')
def click(step, name):
with AssertContextManager(step):
elem = world.browser.find_element_by_link_text(name)
elem.click()
@step('I click by id "(.*?)"$')
def click_by_id(step, id_name):
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str('id("%s")' % id_name))
elem.click()
@step('I should see a link with the url "(.*?)"$')
def should_see_link(step, link_url):
assert_true(step, world.browser.
find_element_by_xpath(str('//a[@href="%s"]' % link_url)))
@step('I should see a link to "(.*?)" with the url "(.*?)"$')
def should_see_link_text(step, link_text, link_url):
assert_true(step,
world.browser.find_element_by_xpath(str(
'//a[@href="%s"][./text()="%s"]' %
(link_url, link_text))))
@step('I should see a link that contains the text "(.*?)" '
'and the url "(.*?)"$')
def should_include_link_text(step, link_text, link_url):
return world.browser.find_element_by_xpath(str(
'//a[@href="%s"][contains(., "%s")]' %
(link_url, link_text)))
## General
@step('The element with id of "(.*?)" contains "(.*?)"$')
def element_contains(step, element_id, value):
return world.browser.find_element_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
@step('The element with id of "(.*?)" does not contain "(.*?)"$')
def element_not_contains(step, element_id, value):
elem = world.browser.find_elements_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
assert_false(step, elem)
@wait_for
def wait_for_visible_elem(browser, xpath):
elem = browser.find_elements_by_xpath(str(xpath))
if not elem:
return False
return elem[0].is_displayed()
@step(r'I should see an element with id of "(.*?)" within (\d+) seconds?$')
def should_see_id_in_seconds(step, element_id, timeout):
elem = wait_for_visible_elem(world.browser, 'id("%s")' % element_id,
timeout=int(timeout))
assert_true(step, elem)
@step('I should see an element with id of "(.*?)"$')
def should_see_id(step, element_id):
elem = world.browser.find_element_by_xpath(str('id("%s")' % element_id))
assert_true(step, elem.is_displayed())
@step('I should not see an element with id of "(.*?)"$')
def should_not_see_id(step, element_id):
try:
elem = world.browser.find_element_by_xpath(str('id("%s")' %
element_id))
assert_true(step, not elem.is_displayed())
except NoSuchElementException:
pass
@step(r'I should see "([^"]+)" within (\d+) seconds?$')
def should_see_in_seconds(step, text, timeout):
assert_true(step,
wait_for_content(world.browser, text, timeout=int(timeout)))
@step('I should see "([^"]+)"$')
def should_see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I see "([^"]+)"$')
def see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I should not see "([^"]+)"$')
def should_not_see(step, text):
assert_true(step, not contains_content(world.browser, text))
@step('I should be at "(.*?)"$')
def url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
## Browser
@step('The browser\'s URL should be "(.*?)"$')
def browser_url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
@step('The browser\'s URL should contain "(.*?)"$')
def url_should_contain(step, url):
assert_true(step, url in world.browser.current_url)
@step('The browser\'s URL should not contain "(.*?)"$')
def url_should_not_contain(step, url):
assert_true(step, url not in world.browser.current_url)
## Forms
@step('I should see a form that goes to "(.*?)"$')
def see_form(step, url):
return world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
DATE_FIELDS = (
'datetime',
'datetime-local',
'date',
)
TEXT_FIELDS = (
'text',
'textarea',
'password',
'month',
'time',
'week',
'number',
'range',
'email',
'url',
'tel',
'color',
)
@step('I fill in "(.*?)" with "(.*?)"$')
def fill_in_textfield(step, field_name, value):
with AssertContextManager(step):
date_field = find_any_field(world.browser,
DATE_FIELDS,
field_name)
if date_field:
field = date_field
else:
field = find_any_field(world.browser,
TEXT_FIELDS,
field_name)
assert_true(step, field,
'Can not find a field named "%s"' % field_name)
if date_field:
field.send_keys(Keys.DELETE)
else:
field.clear()
field.send_keys(value)
@step('I press "(.*?)"$')
def press_button(step, value):
with AssertContextManager(step):
button = find_button(world.browser, value)
button.click()
@step('I click on label "([^"]*)"')
def click_on_label(step, label):
"""
Click on a label
"""
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str(
'//label[normalize-space(text()) = "%s"]' % label))
elem.click()
@step(r'Element with id "([^"]*)" should be focused')
def element_focused(step, id):
"""
Check if the element is focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_true(step, elem == focused)
@step(r'Element with id "([^"]*)" should not be focused')
def element_not_focused(step, id):
"""
Check if the element is not focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_false(step, elem == focused)
@step(r'Input "([^"]*)" (?:has|should have) value "([^"]*)"')
def input_has_value(step, field_name, value):
"""
Check that the form input element has given value.
"""
with AssertContextManager(step):
text_field = find_any_field(world.browser,
DATE_FIELDS + TEXT_FIELDS,
field_name)
assert_false(step, text_field is False,
'Can not find a field named "%s"' % field_name)
assert_equals(text_field.get_attribute('value'), value)
@step(r'I submit the only form')
def submit_the_only_form(step):
"""
Look for a form on the page and submit it.
"""
form = world.browser.find_element_by_xpath(str('//form'))
form.submit()
@step(r'I submit the form with id "([^"]*)"')
def submit_form_id(step, id):
"""
Submit the form having given id.
"""
form = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
form.submit()
@step(r'I submit the form with action "([^"]*)"')
def submit_form_action(step, url):
"""
Submit the form having given action URL.
"""
form = world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
form.submit()
# Checkboxes
@step('I check "(.*?)"$')
def check_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if not check_box.is_selected():
check_box.click()
@step('I uncheck "(.*?)"$')
def uncheck_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if check_box.is_selected():
check_box.click()
@step('The "(.*?)" checkbox should be checked$')
def assert_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, check_box.is_selected())
@step('The "(.*?)" checkbox should not be checked$')
def assert_not_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, not check_box.is_selected())
# Selectors
@step('I select "(.*?)" from "(.*?)"$')
def select_single_item(step, option_name, select_name):
with AssertContextManager(step):
option_box = find_option(world.browser, select_name, option_name)
option_box.click()
@step('I select the following from "([^"]*?)":?$')
def select_multi_items(step, select_name):
with AssertContextManager(step):
# Ensure only the options selected are actually selected
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
select = Select(select_box)
select.deselect_all()
for option in option_names:
try:
select.select_by_value(option)
except NoSuchElementException:
select.select_by_visible_text(option)
@step('The "(.*?)" option from "(.*?)" should be selected$')
def assert_single_selected(step, option_name, select_name):
option_box = find_option(world.browser, select_name, option_name)
assert_true(step, option_box.is_selected())
@step('The following options from "([^"]*?)" should be selected:?$')
def assert_multi_selected(step, select_name):
with AssertContextManager(step):
# Ensure its not selected unless its one of our options
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
option_elems = select_box.find_elements_by_xpath(str('./option'))
for option in option_elems:
if option.get_attribute('id') in option_names or \
option.get_attribute('name') in option_names or \
option.get_attribute('value') in option_names or \
option.text in option_names:
assert_true(step, option.is_selected())
else:
assert_true(step, not option.is_selected())
@step(r'I should see option "([^"]*)" in selector "([^"]*)"')
def select_contains(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is not None)
@step(r'I should not see option "([^"]*)" in selector "([^"]*)"')
def select_does_not_contain(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is None)
## Radios
@step('I choose "(.*?)"$')
def choose_radio(step, value):
with AssertContextManager(step):
box = find_field(world.browser, 'radio', value)
box.click()
@step('The "(.*?)" option should be chosen$')
def assert_radio_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, box.is_selected())
@step('The "(.*?)" option should not be chosen$')
def assert_radio_not_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, not box.is_selected())
# Alerts
@step('I accept the alert')
def accept_alert(step):
"""
Accept the alert
"""
try:
alert = Alert(world.browser)
alert.accept()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I dismiss the alert')
def dismiss_alert(step):
"""
Dismiss the alert
"""
try:
alert = Alert(world.browser)
alert.dismiss()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step(r'I should see an alert with text "([^"]*)"')
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I should not see an alert')
def check_no_alert(step):
"""
Check there is no alert
"""
try:
alert = Alert(world.browser)
raise AssertionError("Should not see an alert. Alert '%s' shown." %
alert.text)
except NoAlertPresentException:
pass
# Tooltips
@step(r'I should see an element with tooltip "([^"]*)"')
def see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, elem)
@step(r'I should not see an element with tooltip "([^"]*)"')
def no_see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, not elem)
@step(r'I (?:click|press) the element with tooltip "([^"]*)"')
@step(r'The page title should be "([^"]*)"')
def page_title(step, title):
"""
Check that the page title matches the given one.
"""
with AssertContextManager(step):
assert_equals(world.browser.title, title)
@step(r'I switch to the frame with id "([^"]*)"')
def switch_to_frame(self, frame):
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to_frame(elem)
@step(r'I switch back to the main view')
def switch_to_main(self):
world.browser.switch_to_default_content()
|
bbangert/lettuce_webdriver | lettuce_webdriver/webdriver.py | page_title | python | def page_title(step, title):
with AssertContextManager(step):
assert_equals(world.browser.title, title) | Check that the page title matches the given one. | train | https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/webdriver.py#L544-L550 | null | """Webdriver support for lettuce"""
from lettuce import step, world
from lettuce_webdriver.util import (assert_true,
assert_false,
AssertContextManager,
find_any_field,
find_button,
find_field,
find_option,
option_in_select,
wait_for)
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from nose.tools import assert_equals
# pylint:disable=missing-docstring,redefined-outer-name
from css_selector_steps import *
def contains_content(browser, content):
# Search for an element that contains the whole of the text we're looking
# for in it or its subelements, but whose children do NOT contain that
# text - otherwise matches <body> or <html> or other similarly useless
# things.
for elem in browser.find_elements_by_xpath(unicode(
u'//*[contains(normalize-space(.),"{content}") '
u'and not(./*[contains(normalize-space(.),"{content}")])]'
.format(content=content))):
try:
if elem.is_displayed():
return True
except StaleElementReferenceException:
pass
return False
@wait_for
def wait_for_elem(browser, xpath):
return browser.find_elements_by_xpath(str(xpath))
@wait_for
def wait_for_content(browser, content):
return contains_content(browser, content)
## URLS
@step('I visit "(.*?)"$')
def visit(step, url):
with AssertContextManager(step):
world.browser.get(url)
@step('I go to "(.*?)"$')
def goto(step, url):
step.given('I visit "%s"' % url)
## Links
@step('I click "(.*?)"$')
def click(step, name):
with AssertContextManager(step):
elem = world.browser.find_element_by_link_text(name)
elem.click()
@step('I click by id "(.*?)"$')
def click_by_id(step, id_name):
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str('id("%s")' % id_name))
elem.click()
@step('I should see a link with the url "(.*?)"$')
def should_see_link(step, link_url):
assert_true(step, world.browser.
find_element_by_xpath(str('//a[@href="%s"]' % link_url)))
@step('I should see a link to "(.*?)" with the url "(.*?)"$')
def should_see_link_text(step, link_text, link_url):
assert_true(step,
world.browser.find_element_by_xpath(str(
'//a[@href="%s"][./text()="%s"]' %
(link_url, link_text))))
@step('I should see a link that contains the text "(.*?)" '
'and the url "(.*?)"$')
def should_include_link_text(step, link_text, link_url):
return world.browser.find_element_by_xpath(str(
'//a[@href="%s"][contains(., "%s")]' %
(link_url, link_text)))
## General
@step('The element with id of "(.*?)" contains "(.*?)"$')
def element_contains(step, element_id, value):
return world.browser.find_element_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
@step('The element with id of "(.*?)" does not contain "(.*?)"$')
def element_not_contains(step, element_id, value):
elem = world.browser.find_elements_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
assert_false(step, elem)
@wait_for
def wait_for_visible_elem(browser, xpath):
elem = browser.find_elements_by_xpath(str(xpath))
if not elem:
return False
return elem[0].is_displayed()
@step(r'I should see an element with id of "(.*?)" within (\d+) seconds?$')
def should_see_id_in_seconds(step, element_id, timeout):
elem = wait_for_visible_elem(world.browser, 'id("%s")' % element_id,
timeout=int(timeout))
assert_true(step, elem)
@step('I should see an element with id of "(.*?)"$')
def should_see_id(step, element_id):
elem = world.browser.find_element_by_xpath(str('id("%s")' % element_id))
assert_true(step, elem.is_displayed())
@step('I should not see an element with id of "(.*?)"$')
def should_not_see_id(step, element_id):
try:
elem = world.browser.find_element_by_xpath(str('id("%s")' %
element_id))
assert_true(step, not elem.is_displayed())
except NoSuchElementException:
pass
@step(r'I should see "([^"]+)" within (\d+) seconds?$')
def should_see_in_seconds(step, text, timeout):
assert_true(step,
wait_for_content(world.browser, text, timeout=int(timeout)))
@step('I should see "([^"]+)"$')
def should_see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I see "([^"]+)"$')
def see(step, text):
assert_true(step, contains_content(world.browser, text))
@step('I should not see "([^"]+)"$')
def should_not_see(step, text):
assert_true(step, not contains_content(world.browser, text))
@step('I should be at "(.*?)"$')
def url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
## Browser
@step('The browser\'s URL should be "(.*?)"$')
def browser_url_should_be(step, url):
assert_true(step, url == world.browser.current_url)
@step('The browser\'s URL should contain "(.*?)"$')
def url_should_contain(step, url):
assert_true(step, url in world.browser.current_url)
@step('The browser\'s URL should not contain "(.*?)"$')
def url_should_not_contain(step, url):
assert_true(step, url not in world.browser.current_url)
## Forms
@step('I should see a form that goes to "(.*?)"$')
def see_form(step, url):
return world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
DATE_FIELDS = (
'datetime',
'datetime-local',
'date',
)
TEXT_FIELDS = (
'text',
'textarea',
'password',
'month',
'time',
'week',
'number',
'range',
'email',
'url',
'tel',
'color',
)
@step('I fill in "(.*?)" with "(.*?)"$')
def fill_in_textfield(step, field_name, value):
with AssertContextManager(step):
date_field = find_any_field(world.browser,
DATE_FIELDS,
field_name)
if date_field:
field = date_field
else:
field = find_any_field(world.browser,
TEXT_FIELDS,
field_name)
assert_true(step, field,
'Can not find a field named "%s"' % field_name)
if date_field:
field.send_keys(Keys.DELETE)
else:
field.clear()
field.send_keys(value)
@step('I press "(.*?)"$')
def press_button(step, value):
with AssertContextManager(step):
button = find_button(world.browser, value)
button.click()
@step('I click on label "([^"]*)"')
def click_on_label(step, label):
"""
Click on a label
"""
with AssertContextManager(step):
elem = world.browser.find_element_by_xpath(str(
'//label[normalize-space(text()) = "%s"]' % label))
elem.click()
@step(r'Element with id "([^"]*)" should be focused')
def element_focused(step, id):
"""
Check if the element is focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_true(step, elem == focused)
@step(r'Element with id "([^"]*)" should not be focused')
def element_not_focused(step, id):
"""
Check if the element is not focused
"""
elem = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
focused = world.browser.switch_to_active_element()
assert_false(step, elem == focused)
@step(r'Input "([^"]*)" (?:has|should have) value "([^"]*)"')
def input_has_value(step, field_name, value):
"""
Check that the form input element has given value.
"""
with AssertContextManager(step):
text_field = find_any_field(world.browser,
DATE_FIELDS + TEXT_FIELDS,
field_name)
assert_false(step, text_field is False,
'Can not find a field named "%s"' % field_name)
assert_equals(text_field.get_attribute('value'), value)
@step(r'I submit the only form')
def submit_the_only_form(step):
"""
Look for a form on the page and submit it.
"""
form = world.browser.find_element_by_xpath(str('//form'))
form.submit()
@step(r'I submit the form with id "([^"]*)"')
def submit_form_id(step, id):
"""
Submit the form having given id.
"""
form = world.browser.find_element_by_xpath(str('id("{id}")'.format(id=id)))
form.submit()
@step(r'I submit the form with action "([^"]*)"')
def submit_form_action(step, url):
"""
Submit the form having given action URL.
"""
form = world.browser.find_element_by_xpath(str('//form[@action="%s"]' %
url))
form.submit()
# Checkboxes
@step('I check "(.*?)"$')
def check_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if not check_box.is_selected():
check_box.click()
@step('I uncheck "(.*?)"$')
def uncheck_checkbox(step, value):
with AssertContextManager(step):
check_box = find_field(world.browser, 'checkbox', value)
if check_box.is_selected():
check_box.click()
@step('The "(.*?)" checkbox should be checked$')
def assert_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, check_box.is_selected())
@step('The "(.*?)" checkbox should not be checked$')
def assert_not_checked_checkbox(step, value):
check_box = find_field(world.browser, 'checkbox', value)
assert_true(step, not check_box.is_selected())
# Selectors
@step('I select "(.*?)" from "(.*?)"$')
def select_single_item(step, option_name, select_name):
with AssertContextManager(step):
option_box = find_option(world.browser, select_name, option_name)
option_box.click()
@step('I select the following from "([^"]*?)":?$')
def select_multi_items(step, select_name):
with AssertContextManager(step):
# Ensure only the options selected are actually selected
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
select = Select(select_box)
select.deselect_all()
for option in option_names:
try:
select.select_by_value(option)
except NoSuchElementException:
select.select_by_visible_text(option)
@step('The "(.*?)" option from "(.*?)" should be selected$')
def assert_single_selected(step, option_name, select_name):
option_box = find_option(world.browser, select_name, option_name)
assert_true(step, option_box.is_selected())
@step('The following options from "([^"]*?)" should be selected:?$')
def assert_multi_selected(step, select_name):
with AssertContextManager(step):
# Ensure its not selected unless its one of our options
option_names = step.multiline.split('\n')
select_box = find_field(world.browser, 'select', select_name)
option_elems = select_box.find_elements_by_xpath(str('./option'))
for option in option_elems:
if option.get_attribute('id') in option_names or \
option.get_attribute('name') in option_names or \
option.get_attribute('value') in option_names or \
option.text in option_names:
assert_true(step, option.is_selected())
else:
assert_true(step, not option.is_selected())
@step(r'I should see option "([^"]*)" in selector "([^"]*)"')
def select_contains(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is not None)
@step(r'I should not see option "([^"]*)" in selector "([^"]*)"')
def select_does_not_contain(step, option, id_):
assert_true(step, option_in_select(world.browser, id_, option) is None)
## Radios
@step('I choose "(.*?)"$')
def choose_radio(step, value):
with AssertContextManager(step):
box = find_field(world.browser, 'radio', value)
box.click()
@step('The "(.*?)" option should be chosen$')
def assert_radio_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, box.is_selected())
@step('The "(.*?)" option should not be chosen$')
def assert_radio_not_selected(step, value):
box = find_field(world.browser, 'radio', value)
assert_true(step, not box.is_selected())
# Alerts
@step('I accept the alert')
def accept_alert(step):
"""
Accept the alert
"""
try:
alert = Alert(world.browser)
alert.accept()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I dismiss the alert')
def dismiss_alert(step):
"""
Dismiss the alert
"""
try:
alert = Alert(world.browser)
alert.dismiss()
except WebDriverException:
# PhantomJS is kinda poor
pass
@step(r'I should see an alert with text "([^"]*)"')
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass
@step('I should not see an alert')
def check_no_alert(step):
"""
Check there is no alert
"""
try:
alert = Alert(world.browser)
raise AssertionError("Should not see an alert. Alert '%s' shown." %
alert.text)
except NoAlertPresentException:
pass
# Tooltips
@step(r'I should see an element with tooltip "([^"]*)"')
def see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, elem)
@step(r'I should not see an element with tooltip "([^"]*)"')
def no_see_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
elem = world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip)))
elem = [e for e in elem if e.is_displayed()]
assert_true(step, not elem)
@step(r'I (?:click|press) the element with tooltip "([^"]*)"')
def press_by_tooltip(step, tooltip):
"""
Press a button having a given tooltip.
"""
with AssertContextManager(step):
for button in world.browser.find_elements_by_xpath(str(
'//*[@title="%(tooltip)s" or @data-original-title="%(tooltip)s"]' %
dict(tooltip=tooltip))):
try:
button.click()
break
except Exception:
pass
else:
raise AssertionError("No button with tooltip '{0}' found"
.format(tooltip))
@step(r'The page title should be "([^"]*)"')
@step(r'I switch to the frame with id "([^"]*)"')
def switch_to_frame(self, frame):
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to_frame(elem)
@step(r'I switch back to the main view')
def switch_to_main(self):
world.browser.switch_to_default_content()
|
muckamuck/stackility | stackility/drift.py | DriftTool._init_boto3_clients | python | def _init_boto3_clients(self, profile, region):
try:
session = None
if profile and region:
session = boto3.session.Session(profile_name=profile, region_name=region)
elif profile:
session = boto3.session.Session(profile_name=profile)
elif region:
session = boto3.session.Session(region_name=region)
else:
session = boto3.session.Session()
self._cloud_formation = session.client('cloudformation')
return True
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False | The utililty requires boto3 clients to CloudFormation.
Args:
None
Returns:
Good or Bad; True or False | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/drift.py#L54-L79 | null | class DriftTool(object):
'''
Utility to find drift in CloudFormation stacks.
'''
def __init__(self, **kwargs):
"""
The initializer sets up stuff to do the work
Args:
dict of args
Returns:
kwarg[Profile]: asdasdf
Raises:
SystemError if thing are not all good
"""
try:
self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30))
except Exception:
self.nap_time = 15
self._stack_name = kwargs.get('Stack')
self._verbose = kwargs.get('Verbose', False)
if not self._stack_name:
logging.error('no stack name given, exiting')
raise SystemError
if not self._init_boto3_clients(kwargs.get('Profile'), kwargs.get('Region')):
logging.error('client initialization failed, exiting')
raise SystemError
def determine_drift(self):
"""
Determine the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)
drift_request_id = response.get('StackDriftDetectionId', None)
if drift_request_id:
logging.info('drift_request_id: %s - polling', drift_request_id)
drift_calc_done = False
while not drift_calc_done:
time.sleep(self.nap_time)
response = self._cloud_formation.describe_stack_drift_detection_status(
StackDriftDetectionId=drift_request_id
)
current_state = response.get('DetectionStatus', None)
logging.info(
'describe_stack_drift_detection_status(): {}'.format(current_state)
)
drift_calc_done = current_state in CALC_DONE_STATES
drift_answer = response.get('StackDriftStatus', 'UNKNOWN')
logging.info('drift of {}: {}'.format(
self._stack_name,
drift_answer
))
if drift_answer == 'DRIFTED':
if self._verbose:
self._print_drift_report()
return False
else:
return True
else:
logging.warning('drift_request_id is None')
return False
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
def _print_drift_report(self):
"""
Report the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
Note: not yet implemented
"""
try:
response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)
rows = []
for resource in response.get('StackResources', []):
row = []
row.append(resource.get('LogicalResourceId', 'unknown'))
row.append(resource.get('PhysicalResourceId', 'unknown'))
row.append(resource.get('ResourceStatus', 'unknown'))
row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))
rows.append(row)
print('Drift Report:')
print(tabulate(rows, headers=[
'Logical ID',
'Physical ID',
'Resource Status',
'Drift Info'
]))
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
return True
|
muckamuck/stackility | stackility/drift.py | DriftTool.determine_drift | python | def determine_drift(self):
try:
response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)
drift_request_id = response.get('StackDriftDetectionId', None)
if drift_request_id:
logging.info('drift_request_id: %s - polling', drift_request_id)
drift_calc_done = False
while not drift_calc_done:
time.sleep(self.nap_time)
response = self._cloud_formation.describe_stack_drift_detection_status(
StackDriftDetectionId=drift_request_id
)
current_state = response.get('DetectionStatus', None)
logging.info(
'describe_stack_drift_detection_status(): {}'.format(current_state)
)
drift_calc_done = current_state in CALC_DONE_STATES
drift_answer = response.get('StackDriftStatus', 'UNKNOWN')
logging.info('drift of {}: {}'.format(
self._stack_name,
drift_answer
))
if drift_answer == 'DRIFTED':
if self._verbose:
self._print_drift_report()
return False
else:
return True
else:
logging.warning('drift_request_id is None')
return False
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False | Determine the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/drift.py#L81-L126 | [
"def _print_drift_report(self):\n \"\"\"\n Report the drift of the stack.\n\n Args:\n None\n\n Returns:\n Good or Bad; True or False\n\n Note: not yet implemented\n \"\"\"\n try:\n response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)\n ... | class DriftTool(object):
'''
Utility to find drift in CloudFormation stacks.
'''
def __init__(self, **kwargs):
"""
The initializer sets up stuff to do the work
Args:
dict of args
Returns:
kwarg[Profile]: asdasdf
Raises:
SystemError if thing are not all good
"""
try:
self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30))
except Exception:
self.nap_time = 15
self._stack_name = kwargs.get('Stack')
self._verbose = kwargs.get('Verbose', False)
if not self._stack_name:
logging.error('no stack name given, exiting')
raise SystemError
if not self._init_boto3_clients(kwargs.get('Profile'), kwargs.get('Region')):
logging.error('client initialization failed, exiting')
raise SystemError
def _init_boto3_clients(self, profile, region):
"""
The utililty requires boto3 clients to CloudFormation.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
session = None
if profile and region:
session = boto3.session.Session(profile_name=profile, region_name=region)
elif profile:
session = boto3.session.Session(profile_name=profile)
elif region:
session = boto3.session.Session(region_name=region)
else:
session = boto3.session.Session()
self._cloud_formation = session.client('cloudformation')
return True
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
def _print_drift_report(self):
"""
Report the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
Note: not yet implemented
"""
try:
response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)
rows = []
for resource in response.get('StackResources', []):
row = []
row.append(resource.get('LogicalResourceId', 'unknown'))
row.append(resource.get('PhysicalResourceId', 'unknown'))
row.append(resource.get('ResourceStatus', 'unknown'))
row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))
rows.append(row)
print('Drift Report:')
print(tabulate(rows, headers=[
'Logical ID',
'Physical ID',
'Resource Status',
'Drift Info'
]))
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
return True
|
muckamuck/stackility | stackility/drift.py | DriftTool._print_drift_report | python | def _print_drift_report(self):
try:
response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)
rows = []
for resource in response.get('StackResources', []):
row = []
row.append(resource.get('LogicalResourceId', 'unknown'))
row.append(resource.get('PhysicalResourceId', 'unknown'))
row.append(resource.get('ResourceStatus', 'unknown'))
row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))
rows.append(row)
print('Drift Report:')
print(tabulate(rows, headers=[
'Logical ID',
'Physical ID',
'Resource Status',
'Drift Info'
]))
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
return True | Report the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
Note: not yet implemented | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/drift.py#L128-L162 | null | class DriftTool(object):
'''
Utility to find drift in CloudFormation stacks.
'''
def __init__(self, **kwargs):
"""
The initializer sets up stuff to do the work
Args:
dict of args
Returns:
kwarg[Profile]: asdasdf
Raises:
SystemError if thing are not all good
"""
try:
self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30))
except Exception:
self.nap_time = 15
self._stack_name = kwargs.get('Stack')
self._verbose = kwargs.get('Verbose', False)
if not self._stack_name:
logging.error('no stack name given, exiting')
raise SystemError
if not self._init_boto3_clients(kwargs.get('Profile'), kwargs.get('Region')):
logging.error('client initialization failed, exiting')
raise SystemError
def _init_boto3_clients(self, profile, region):
"""
The utililty requires boto3 clients to CloudFormation.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
session = None
if profile and region:
session = boto3.session.Session(profile_name=profile, region_name=region)
elif profile:
session = boto3.session.Session(profile_name=profile)
elif region:
session = boto3.session.Session(region_name=region)
else:
session = boto3.session.Session()
self._cloud_formation = session.client('cloudformation')
return True
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
def determine_drift(self):
"""
Determine the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)
drift_request_id = response.get('StackDriftDetectionId', None)
if drift_request_id:
logging.info('drift_request_id: %s - polling', drift_request_id)
drift_calc_done = False
while not drift_calc_done:
time.sleep(self.nap_time)
response = self._cloud_formation.describe_stack_drift_detection_status(
StackDriftDetectionId=drift_request_id
)
current_state = response.get('DetectionStatus', None)
logging.info(
'describe_stack_drift_detection_status(): {}'.format(current_state)
)
drift_calc_done = current_state in CALC_DONE_STATES
drift_answer = response.get('StackDriftStatus', 'UNKNOWN')
logging.info('drift of {}: {}'.format(
self._stack_name,
drift_answer
))
if drift_answer == 'DRIFTED':
if self._verbose:
self._print_drift_report()
return False
else:
return True
else:
logging.warning('drift_request_id is None')
return False
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
|
muckamuck/stackility | stackility/command.py | upsert | python | def upsert(version, stack, ini, dryrun, yaml, no_poll, work_directory):
ini_data = read_config_info(ini)
if 'environment' not in ini_data:
print('[environment] section is required in the INI file')
sys.exit(1)
if version:
ini_data['codeVersion'] = version
else:
ini_data['codeVersion'] = str(int(time.time()))
if 'region' not in ini_data['environment']:
ini_data['environment']['region'] = find_myself()
ini_data['yaml'] = bool(yaml)
ini_data['no_poll'] = bool(no_poll)
ini_data['dryrun'] = bool(dryrun)
if stack:
ini_data['environment']['stack_name'] = stack
if work_directory:
try:
os.chdir(work_directory)
except Exception as wtf:
logging.error(wtf)
sys.exit(2)
print(json.dumps(ini_data, indent=2))
start_upsert(ini_data) | The main reason we have arrived here. This is the entry-point for the
utility to create/update a CloudFormation stack. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L41-L74 | [
"def read_config_info(ini_file):\n \"\"\"\n Read the INI file\n\n Args:\n ini_file - path to the file\n\n Returns:\n A dictionary of stuff from the INI file\n\n Exits:\n 1 - if problems are encountered\n \"\"\"\n try:\n config = RawConfigParser()\n config.opti... | """
The command line interface to stackility.
Major help from: https://www.youtube.com/watch?v=kNke39OZ2k0
"""
from configparser import RawConfigParser
import time
import json
import logging
import sys
import os
import traceback
import boto3
import click
from stackility import CloudStackUtility
from stackility import StackTool
from stackility import DriftTool
@click.group()
@click.version_option(version='0.7.2')
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
pass
@cli.command()
@click.option('--version', '-v', help='code version')
@click.option('--stack', '-s', help='stack name')
@click.option('--ini', '-i', help='INI file with needed information', required=True)
@click.option('--dryrun', '-d', help='dry run, generate a change set report', is_flag=True)
@click.option(
'--yaml', '-y',
help='YAML template (deprecated - YAMLness is now detected at run-time)',
is_flag=True
)
@click.option('--no-poll', help='Start the stack work but do not poll', is_flag=True)
@click.option('--work-directory', '-w', help='Start in the given working directory')
@cli.command()
@click.option('-s', '--stack', required=True)
@click.option('-r', '--region')
@click.option('-f', '--profile')
def delete(stack, region, profile):
"""
Delete the given CloudFormation stack.
"""
ini_data = {}
environment = {}
environment['stack_name'] = stack
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_smash(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('-r', '--region')
@click.option('-f', '--profile')
def list(region, profile):
"""
List all the CloudFormation stacks in the given region.
"""
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('--stack', '-s', help='stack name', required=True)
@click.option('-r', '--region', help='region where the stack lives')
@click.option('-f', '--profile', help='AWS profile to access resources')
def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1)
def start_upsert(ini_data):
"""
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad
"""
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if not cf_client:
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(
stack_name,
region,
cf_client
)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1)
def start_list(command_line):
"""
Facilitate the listing of a CloudFormation stacks
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.list()
def start_smash(command_line):
"""
Facilitate the smashing of a CloudFormation stack
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.smash()
def find_myself():
"""
Find myself
Args:
None
Returns:
An Amazon region
"""
s = boto3.session.Session()
return s.region_name
def read_config_info(ini_file):
"""
Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered
"""
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
the_stuff[section][option] = config.get(section, option)
return the_stuff
except Exception as wtf:
logging.error('Exception caught in read_config_info(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return sys.exit(1)
|
muckamuck/stackility | stackility/command.py | delete | python | def delete(stack, region, profile):
ini_data = {}
environment = {}
environment['stack_name'] = stack
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_smash(ini_data):
sys.exit(0)
else:
sys.exit(1) | Delete the given CloudFormation stack. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L81-L102 | [
"def find_myself():\n \"\"\"\n Find myself\n\n Args:\n None\n\n Returns:\n An Amazon region\n \"\"\"\n s = boto3.session.Session()\n return s.region_name\n",
"def start_smash(command_line):\n \"\"\"\n Facilitate the smashing of a CloudFormation stack\n\n Args:\n c... | """
The command line interface to stackility.
Major help from: https://www.youtube.com/watch?v=kNke39OZ2k0
"""
from configparser import RawConfigParser
import time
import json
import logging
import sys
import os
import traceback
import boto3
import click
from stackility import CloudStackUtility
from stackility import StackTool
from stackility import DriftTool
@click.group()
@click.version_option(version='0.7.2')
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
pass
@cli.command()
@click.option('--version', '-v', help='code version')
@click.option('--stack', '-s', help='stack name')
@click.option('--ini', '-i', help='INI file with needed information', required=True)
@click.option('--dryrun', '-d', help='dry run, generate a change set report', is_flag=True)
@click.option(
'--yaml', '-y',
help='YAML template (deprecated - YAMLness is now detected at run-time)',
is_flag=True
)
@click.option('--no-poll', help='Start the stack work but do not poll', is_flag=True)
@click.option('--work-directory', '-w', help='Start in the given working directory')
def upsert(version, stack, ini, dryrun, yaml, no_poll, work_directory):
"""
The main reason we have arrived here. This is the entry-point for the
utility to create/update a CloudFormation stack.
"""
ini_data = read_config_info(ini)
if 'environment' not in ini_data:
print('[environment] section is required in the INI file')
sys.exit(1)
if version:
ini_data['codeVersion'] = version
else:
ini_data['codeVersion'] = str(int(time.time()))
if 'region' not in ini_data['environment']:
ini_data['environment']['region'] = find_myself()
ini_data['yaml'] = bool(yaml)
ini_data['no_poll'] = bool(no_poll)
ini_data['dryrun'] = bool(dryrun)
if stack:
ini_data['environment']['stack_name'] = stack
if work_directory:
try:
os.chdir(work_directory)
except Exception as wtf:
logging.error(wtf)
sys.exit(2)
print(json.dumps(ini_data, indent=2))
start_upsert(ini_data)
@cli.command()
@click.option('-s', '--stack', required=True)
@click.option('-r', '--region')
@click.option('-f', '--profile')
@cli.command()
@click.option('-r', '--region')
@click.option('-f', '--profile')
def list(region, profile):
"""
List all the CloudFormation stacks in the given region.
"""
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('--stack', '-s', help='stack name', required=True)
@click.option('-r', '--region', help='region where the stack lives')
@click.option('-f', '--profile', help='AWS profile to access resources')
def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1)
def start_upsert(ini_data):
"""
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad
"""
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if not cf_client:
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(
stack_name,
region,
cf_client
)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1)
def start_list(command_line):
"""
Facilitate the listing of a CloudFormation stacks
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.list()
def start_smash(command_line):
"""
Facilitate the smashing of a CloudFormation stack
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.smash()
def find_myself():
"""
Find myself
Args:
None
Returns:
An Amazon region
"""
s = boto3.session.Session()
return s.region_name
def read_config_info(ini_file):
"""
Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered
"""
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
the_stuff[section][option] = config.get(section, option)
return the_stuff
except Exception as wtf:
logging.error('Exception caught in read_config_info(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return sys.exit(1)
|
muckamuck/stackility | stackility/command.py | list | python | def list(region, profile):
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1) | List all the CloudFormation stacks in the given region. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L108-L127 | [
"def find_myself():\n \"\"\"\n Find myself\n\n Args:\n None\n\n Returns:\n An Amazon region\n \"\"\"\n s = boto3.session.Session()\n return s.region_name\n",
"def start_list(command_line):\n \"\"\"\n Facilitate the listing of a CloudFormation stacks\n\n Args:\n co... | """
The command line interface to stackility.
Major help from: https://www.youtube.com/watch?v=kNke39OZ2k0
"""
from configparser import RawConfigParser
import time
import json
import logging
import sys
import os
import traceback
import boto3
import click
from stackility import CloudStackUtility
from stackility import StackTool
from stackility import DriftTool
@click.group()
@click.version_option(version='0.7.2')
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
pass
@cli.command()
@click.option('--version', '-v', help='code version')
@click.option('--stack', '-s', help='stack name')
@click.option('--ini', '-i', help='INI file with needed information', required=True)
@click.option('--dryrun', '-d', help='dry run, generate a change set report', is_flag=True)
@click.option(
'--yaml', '-y',
help='YAML template (deprecated - YAMLness is now detected at run-time)',
is_flag=True
)
@click.option('--no-poll', help='Start the stack work but do not poll', is_flag=True)
@click.option('--work-directory', '-w', help='Start in the given working directory')
def upsert(version, stack, ini, dryrun, yaml, no_poll, work_directory):
"""
The main reason we have arrived here. This is the entry-point for the
utility to create/update a CloudFormation stack.
"""
ini_data = read_config_info(ini)
if 'environment' not in ini_data:
print('[environment] section is required in the INI file')
sys.exit(1)
if version:
ini_data['codeVersion'] = version
else:
ini_data['codeVersion'] = str(int(time.time()))
if 'region' not in ini_data['environment']:
ini_data['environment']['region'] = find_myself()
ini_data['yaml'] = bool(yaml)
ini_data['no_poll'] = bool(no_poll)
ini_data['dryrun'] = bool(dryrun)
if stack:
ini_data['environment']['stack_name'] = stack
if work_directory:
try:
os.chdir(work_directory)
except Exception as wtf:
logging.error(wtf)
sys.exit(2)
print(json.dumps(ini_data, indent=2))
start_upsert(ini_data)
@cli.command()
@click.option('-s', '--stack', required=True)
@click.option('-r', '--region')
@click.option('-f', '--profile')
def delete(stack, region, profile):
"""
Delete the given CloudFormation stack.
"""
ini_data = {}
environment = {}
environment['stack_name'] = stack
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_smash(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('-r', '--region')
@click.option('-f', '--profile')
@cli.command()
@click.option('--stack', '-s', help='stack name', required=True)
@click.option('-r', '--region', help='region where the stack lives')
@click.option('-f', '--profile', help='AWS profile to access resources')
def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1)
def start_upsert(ini_data):
"""
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad
"""
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if not cf_client:
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(
stack_name,
region,
cf_client
)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1)
def start_list(command_line):
"""
Facilitate the listing of a CloudFormation stacks
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.list()
def start_smash(command_line):
"""
Facilitate the smashing of a CloudFormation stack
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.smash()
def find_myself():
"""
Find myself
Args:
None
Returns:
An Amazon region
"""
s = boto3.session.Session()
return s.region_name
def read_config_info(ini_file):
"""
Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered
"""
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
the_stuff[section][option] = config.get(section, option)
return the_stuff
except Exception as wtf:
logging.error('Exception caught in read_config_info(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return sys.exit(1)
|
muckamuck/stackility | stackility/command.py | drift | python | def drift(stack, region, profile):
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1) | Produce a CloudFormation drift report for the given stack. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L134-L151 | [
"def determine_drift(self):\n \"\"\"\n Determine the drift of the stack.\n\n Args:\n None\n\n Returns:\n Good or Bad; True or False\n \"\"\"\n try:\n response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)\n drift_request_id = response.get('Stack... | """
The command line interface to stackility.
Major help from: https://www.youtube.com/watch?v=kNke39OZ2k0
"""
from configparser import RawConfigParser
import time
import json
import logging
import sys
import os
import traceback
import boto3
import click
from stackility import CloudStackUtility
from stackility import StackTool
from stackility import DriftTool
@click.group()
@click.version_option(version='0.7.2')
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
pass
@cli.command()
@click.option('--version', '-v', help='code version')
@click.option('--stack', '-s', help='stack name')
@click.option('--ini', '-i', help='INI file with needed information', required=True)
@click.option('--dryrun', '-d', help='dry run, generate a change set report', is_flag=True)
@click.option(
'--yaml', '-y',
help='YAML template (deprecated - YAMLness is now detected at run-time)',
is_flag=True
)
@click.option('--no-poll', help='Start the stack work but do not poll', is_flag=True)
@click.option('--work-directory', '-w', help='Start in the given working directory')
def upsert(version, stack, ini, dryrun, yaml, no_poll, work_directory):
"""
The main reason we have arrived here. This is the entry-point for the
utility to create/update a CloudFormation stack.
"""
ini_data = read_config_info(ini)
if 'environment' not in ini_data:
print('[environment] section is required in the INI file')
sys.exit(1)
if version:
ini_data['codeVersion'] = version
else:
ini_data['codeVersion'] = str(int(time.time()))
if 'region' not in ini_data['environment']:
ini_data['environment']['region'] = find_myself()
ini_data['yaml'] = bool(yaml)
ini_data['no_poll'] = bool(no_poll)
ini_data['dryrun'] = bool(dryrun)
if stack:
ini_data['environment']['stack_name'] = stack
if work_directory:
try:
os.chdir(work_directory)
except Exception as wtf:
logging.error(wtf)
sys.exit(2)
print(json.dumps(ini_data, indent=2))
start_upsert(ini_data)
@cli.command()
@click.option('-s', '--stack', required=True)
@click.option('-r', '--region')
@click.option('-f', '--profile')
def delete(stack, region, profile):
"""
Delete the given CloudFormation stack.
"""
ini_data = {}
environment = {}
environment['stack_name'] = stack
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_smash(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('-r', '--region')
@click.option('-f', '--profile')
def list(region, profile):
"""
List all the CloudFormation stacks in the given region.
"""
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('--stack', '-s', help='stack name', required=True)
@click.option('-r', '--region', help='region where the stack lives')
@click.option('-f', '--profile', help='AWS profile to access resources')
def start_upsert(ini_data):
"""
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad
"""
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if not cf_client:
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(
stack_name,
region,
cf_client
)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1)
def start_list(command_line):
"""
Facilitate the listing of a CloudFormation stacks
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.list()
def start_smash(command_line):
"""
Facilitate the smashing of a CloudFormation stack
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.smash()
def find_myself():
"""
Find myself
Args:
None
Returns:
An Amazon region
"""
s = boto3.session.Session()
return s.region_name
def read_config_info(ini_file):
"""
Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered
"""
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
the_stuff[section][option] = config.get(section, option)
return the_stuff
except Exception as wtf:
logging.error('Exception caught in read_config_info(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return sys.exit(1)
|
muckamuck/stackility | stackility/command.py | start_upsert | python | def start_upsert(ini_data):
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if not cf_client:
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(
stack_name,
region,
cf_client
)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1) | Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L154-L212 | [
"def upsert(self):\n \"\"\"\n The main event of the utility. Create or update a Cloud Formation\n stack. Injecting properties where needed\n\n Args:\n None\n\n Returns:\n True if the stack create/update is started successfully else\n False if the start goes off in the weeds.\n\n ... | """
The command line interface to stackility.
Major help from: https://www.youtube.com/watch?v=kNke39OZ2k0
"""
from configparser import RawConfigParser
import time
import json
import logging
import sys
import os
import traceback
import boto3
import click
from stackility import CloudStackUtility
from stackility import StackTool
from stackility import DriftTool
@click.group()
@click.version_option(version='0.7.2')
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
pass
@cli.command()
@click.option('--version', '-v', help='code version')
@click.option('--stack', '-s', help='stack name')
@click.option('--ini', '-i', help='INI file with needed information', required=True)
@click.option('--dryrun', '-d', help='dry run, generate a change set report', is_flag=True)
@click.option(
'--yaml', '-y',
help='YAML template (deprecated - YAMLness is now detected at run-time)',
is_flag=True
)
@click.option('--no-poll', help='Start the stack work but do not poll', is_flag=True)
@click.option('--work-directory', '-w', help='Start in the given working directory')
def upsert(version, stack, ini, dryrun, yaml, no_poll, work_directory):
"""
The main reason we have arrived here. This is the entry-point for the
utility to create/update a CloudFormation stack.
"""
ini_data = read_config_info(ini)
if 'environment' not in ini_data:
print('[environment] section is required in the INI file')
sys.exit(1)
if version:
ini_data['codeVersion'] = version
else:
ini_data['codeVersion'] = str(int(time.time()))
if 'region' not in ini_data['environment']:
ini_data['environment']['region'] = find_myself()
ini_data['yaml'] = bool(yaml)
ini_data['no_poll'] = bool(no_poll)
ini_data['dryrun'] = bool(dryrun)
if stack:
ini_data['environment']['stack_name'] = stack
if work_directory:
try:
os.chdir(work_directory)
except Exception as wtf:
logging.error(wtf)
sys.exit(2)
print(json.dumps(ini_data, indent=2))
start_upsert(ini_data)
@cli.command()
@click.option('-s', '--stack', required=True)
@click.option('-r', '--region')
@click.option('-f', '--profile')
def delete(stack, region, profile):
"""
Delete the given CloudFormation stack.
"""
ini_data = {}
environment = {}
environment['stack_name'] = stack
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_smash(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('-r', '--region')
@click.option('-f', '--profile')
def list(region, profile):
"""
List all the CloudFormation stacks in the given region.
"""
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('--stack', '-s', help='stack name', required=True)
@click.option('-r', '--region', help='region where the stack lives')
@click.option('-f', '--profile', help='AWS profile to access resources')
def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1)
def start_list(command_line):
"""
Facilitate the listing of a CloudFormation stacks
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.list()
def start_smash(command_line):
"""
Facilitate the smashing of a CloudFormation stack
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.smash()
def find_myself():
"""
Find myself
Args:
None
Returns:
An Amazon region
"""
s = boto3.session.Session()
return s.region_name
def read_config_info(ini_file):
"""
Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered
"""
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
the_stuff[section][option] = config.get(section, option)
return the_stuff
except Exception as wtf:
logging.error('Exception caught in read_config_info(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return sys.exit(1)
|
muckamuck/stackility | stackility/command.py | read_config_info | python | def read_config_info(ini_file):
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
the_stuff[section][option] = config.get(section, option)
return the_stuff
except Exception as wtf:
logging.error('Exception caught in read_config_info(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return sys.exit(1) | Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L257-L284 | null | """
The command line interface to stackility.
Major help from: https://www.youtube.com/watch?v=kNke39OZ2k0
"""
from configparser import RawConfigParser
import time
import json
import logging
import sys
import os
import traceback
import boto3
import click
from stackility import CloudStackUtility
from stackility import StackTool
from stackility import DriftTool
@click.group()
@click.version_option(version='0.7.2')
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
pass
@cli.command()
@click.option('--version', '-v', help='code version')
@click.option('--stack', '-s', help='stack name')
@click.option('--ini', '-i', help='INI file with needed information', required=True)
@click.option('--dryrun', '-d', help='dry run, generate a change set report', is_flag=True)
@click.option(
'--yaml', '-y',
help='YAML template (deprecated - YAMLness is now detected at run-time)',
is_flag=True
)
@click.option('--no-poll', help='Start the stack work but do not poll', is_flag=True)
@click.option('--work-directory', '-w', help='Start in the given working directory')
def upsert(version, stack, ini, dryrun, yaml, no_poll, work_directory):
"""
The main reason we have arrived here. This is the entry-point for the
utility to create/update a CloudFormation stack.
"""
ini_data = read_config_info(ini)
if 'environment' not in ini_data:
print('[environment] section is required in the INI file')
sys.exit(1)
if version:
ini_data['codeVersion'] = version
else:
ini_data['codeVersion'] = str(int(time.time()))
if 'region' not in ini_data['environment']:
ini_data['environment']['region'] = find_myself()
ini_data['yaml'] = bool(yaml)
ini_data['no_poll'] = bool(no_poll)
ini_data['dryrun'] = bool(dryrun)
if stack:
ini_data['environment']['stack_name'] = stack
if work_directory:
try:
os.chdir(work_directory)
except Exception as wtf:
logging.error(wtf)
sys.exit(2)
print(json.dumps(ini_data, indent=2))
start_upsert(ini_data)
@cli.command()
@click.option('-s', '--stack', required=True)
@click.option('-r', '--region')
@click.option('-f', '--profile')
def delete(stack, region, profile):
"""
Delete the given CloudFormation stack.
"""
ini_data = {}
environment = {}
environment['stack_name'] = stack
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_smash(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('-r', '--region')
@click.option('-f', '--profile')
def list(region, profile):
"""
List all the CloudFormation stacks in the given region.
"""
ini_data = {}
environment = {}
if region:
environment['region'] = region
else:
environment['region'] = find_myself()
if profile:
environment['profile'] = profile
ini_data['environment'] = environment
if start_list(ini_data):
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option('--stack', '-s', help='stack name', required=True)
@click.option('-r', '--region', help='region where the stack lives')
@click.option('-f', '--profile', help='AWS profile to access resources')
def drift(stack, region, profile):
"""
Produce a CloudFormation drift report for the given stack.
"""
logging.debug('finding drift - stack: {}'.format(stack))
logging.debug('region: {}'.format(region))
logging.debug('profile: {}'.format(profile))
tool = DriftTool(
Stack=stack,
Region=region,
Profile=profile,
Verbose=True
)
if tool.determine_drift():
sys.exit(0)
else:
sys.exit(1)
def start_upsert(ini_data):
"""
Helper function to facilitate upsert.
Args:
ini_date - the dictionary of info to run upsert
Exit:
0 - good
1 - bad
"""
stack_driver = CloudStackUtility(ini_data)
poll_stack = not ini_data.get('no_poll', False)
if stack_driver.upsert():
logging.info('stack create/update was started successfully.')
if poll_stack:
stack_tool = None
try:
profile = ini_data.get('environment', {}).get('profile')
if profile:
boto3_session = boto3.session.Session(profile_name=profile)
else:
boto3_session = boto3.session.Session()
region = ini_data['environment']['region']
stack_name = ini_data['environment']['stack_name']
cf_client = stack_driver.get_cloud_formation_client()
if not cf_client:
cf_client = boto3_session.client('cloudformation', region_name=region)
stack_tool = stack_tool = StackTool(
stack_name,
region,
cf_client
)
except Exception as wtf:
logging.warning('there was a problems creating stack tool: {}'.format(wtf))
if stack_driver.poll_stack():
try:
logging.info('stack create/update was finished successfully.')
stack_tool.print_stack_info()
except Exception as wtf:
logging.warning('there was a problems printing stack info: {}'.format(wtf))
sys.exit(0)
else:
try:
logging.error('stack create/update was did not go well.')
stack_tool.print_stack_events()
except Exception as wtf:
logging.warning('there was a problems printing stack events: {}'.format(wtf))
sys.exit(1)
else:
logging.error('start of stack create/update did not go well.')
sys.exit(1)
def start_list(command_line):
"""
Facilitate the listing of a CloudFormation stacks
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.list()
def start_smash(command_line):
"""
Facilitate the smashing of a CloudFormation stack
Args:
command_line - a dictionary to of info to inform the operation
Returns:
True if happy else False
"""
stack_driver = CloudStackUtility(command_line)
return stack_driver.smash()
def find_myself():
"""
Find myself
Args:
None
Returns:
An Amazon region
"""
s = boto3.session.Session()
return s.region_name
|
muckamuck/stackility | stackility/stack_tool.py | StackTool.print_stack_info | python | def print_stack_info(self):
'''
List resources from the given stack
Args:
None
Returns:
A dictionary filled resources or None if things went sideways
'''
try:
rest_api_id = None
deployment_found = False
response = self._cf_client.describe_stack_resources(
StackName=self._stack_name
)
print('\nThe following resources were created:')
rows = []
for resource in response['StackResources']:
if resource['ResourceType'] == 'AWS::ApiGateway::RestApi':
rest_api_id = resource['PhysicalResourceId']
elif resource['ResourceType'] == 'AWS::ApiGateway::Deployment':
deployment_found = True
row = []
row.append(resource['ResourceType'])
row.append(resource['LogicalResourceId'])
row.append(resource['PhysicalResourceId'])
rows.append(row)
'''
print('\t{}\t{}\t{}'.format(
resource['ResourceType'],
resource['LogicalResourceId'],
resource['PhysicalResourceId']
)
)
'''
print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID']))
if rest_api_id and deployment_found:
url = 'https://{}.execute-api.{}.amazonaws.com/{}'.format(
rest_api_id,
self._region,
'<stage>'
)
print('\nThe deployed service can be found at this URL:')
print('\t{}\n'.format(url))
return response
except Exception as wtf:
print(wtf)
return None | List resources from the given stack
Args:
None
Returns:
A dictionary filled resources or None if things went sideways | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/stack_tool.py#L39-L92 | null | class StackTool:
_cf_client = None
_stack_name = None
_region = None
def __init__(self, stack_name, region, cf_client):
"""
StackTool is a simple tool to print some specific data about a
CloudFormation stack.
Args:
stack_name - name of the stack of interest
region - AWS region where the stack was created
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
try:
self._stack_name = stack_name
self._region = region
self._cf_client = cf_client
except Exception:
raise SystemError
def print_stack_events(self):
'''
List events from the given stack
Args:
None
Returns:
None
'''
first_token = '7be7981bd6287dd8112305e8f3822a6f'
keep_going = True
next_token = first_token
current_request_token = None
rows = []
try:
while keep_going and next_token:
if next_token == first_token:
response = self._cf_client.describe_stack_events(
StackName=self._stack_name
)
else:
response = self._cf_client.describe_stack_events(
StackName=self._stack_name,
NextToken=next_token
)
next_token = response.get('NextToken', None)
for event in response['StackEvents']:
row = []
event_time = event.get('Timestamp')
request_token = event.get('ClientRequestToken', 'unknown')
if current_request_token is None:
current_request_token = request_token
elif current_request_token != request_token:
keep_going = False
break
row.append(event_time.strftime('%x %X'))
row.append(event.get('LogicalResourceId'))
row.append(event.get('ResourceStatus'))
row.append(event.get('ResourceStatusReason', ''))
rows.append(row)
if len(rows) > 0:
print('\nEvents for the current upsert:')
print(tabulate(rows, headers=['Time', 'Logical ID', 'Status', 'Message']))
return True
else:
print('\nNo stack events found\n')
except Exception as wtf:
print(wtf)
return False
|
muckamuck/stackility | stackility/stack_tool.py | StackTool.print_stack_events | python | def print_stack_events(self):
'''
List events from the given stack
Args:
None
Returns:
None
'''
first_token = '7be7981bd6287dd8112305e8f3822a6f'
keep_going = True
next_token = first_token
current_request_token = None
rows = []
try:
while keep_going and next_token:
if next_token == first_token:
response = self._cf_client.describe_stack_events(
StackName=self._stack_name
)
else:
response = self._cf_client.describe_stack_events(
StackName=self._stack_name,
NextToken=next_token
)
next_token = response.get('NextToken', None)
for event in response['StackEvents']:
row = []
event_time = event.get('Timestamp')
request_token = event.get('ClientRequestToken', 'unknown')
if current_request_token is None:
current_request_token = request_token
elif current_request_token != request_token:
keep_going = False
break
row.append(event_time.strftime('%x %X'))
row.append(event.get('LogicalResourceId'))
row.append(event.get('ResourceStatus'))
row.append(event.get('ResourceStatusReason', ''))
rows.append(row)
if len(rows) > 0:
print('\nEvents for the current upsert:')
print(tabulate(rows, headers=['Time', 'Logical ID', 'Status', 'Message']))
return True
else:
print('\nNo stack events found\n')
except Exception as wtf:
print(wtf)
return False | List events from the given stack
Args:
None
Returns:
None | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/stack_tool.py#L94-L147 | null | class StackTool:
_cf_client = None
_stack_name = None
_region = None
def __init__(self, stack_name, region, cf_client):
"""
StackTool is a simple tool to print some specific data about a
CloudFormation stack.
Args:
stack_name - name of the stack of interest
region - AWS region where the stack was created
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
try:
self._stack_name = stack_name
self._region = region
self._cf_client = cf_client
except Exception:
raise SystemError
def print_stack_info(self):
'''
List resources from the given stack
Args:
None
Returns:
A dictionary filled resources or None if things went sideways
'''
try:
rest_api_id = None
deployment_found = False
response = self._cf_client.describe_stack_resources(
StackName=self._stack_name
)
print('\nThe following resources were created:')
rows = []
for resource in response['StackResources']:
if resource['ResourceType'] == 'AWS::ApiGateway::RestApi':
rest_api_id = resource['PhysicalResourceId']
elif resource['ResourceType'] == 'AWS::ApiGateway::Deployment':
deployment_found = True
row = []
row.append(resource['ResourceType'])
row.append(resource['LogicalResourceId'])
row.append(resource['PhysicalResourceId'])
rows.append(row)
'''
print('\t{}\t{}\t{}'.format(
resource['ResourceType'],
resource['LogicalResourceId'],
resource['PhysicalResourceId']
)
)
'''
print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID']))
if rest_api_id and deployment_found:
url = 'https://{}.execute-api.{}.amazonaws.com/{}'.format(
rest_api_id,
self._region,
'<stage>'
)
print('\nThe deployed service can be found at this URL:')
print('\t{}\n'.format(url))
return response
except Exception as wtf:
print(wtf)
return None
|
muckamuck/stackility | stackility/utility/get_ssm_parameter.py | get_ssm_parameter | python | def get_ssm_parameter(parameter_name):
'''
Get the decrypted value of an SSM parameter
Args:
parameter_name - the name of the stored parameter of interest
Return:
Value if allowed and present else None
'''
try:
response = boto3.client('ssm').get_parameters(
Names=[parameter_name],
WithDecryption=True
)
return response.get('Parameters', None)[0].get('Value', '')
except Exception:
pass
return '' | Get the decrypted value of an SSM parameter
Args:
parameter_name - the name of the stored parameter of interest
Return:
Value if allowed and present else None | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/utility/get_ssm_parameter.py#L6-L26 | null | from __future__ import print_function
import boto3
import sys
def main():
value = get_ssm_parameter(sys.argv[1])
print(value, end='')
if __name__ == '__main__':
main()
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility.upsert | python | def upsert(self):
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True | The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L89-L179 | [
"def _initialize_upsert(self):\n if not self._validate_ini_data():\n logging.error('INI file missing required bits; bucket and/or template and/or stack_name')\n raise SystemError\n elif not self._render_template():\n logging.error('template rendering failed')\n raise SystemError\n ... | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility.list | python | def list(self):
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True | List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L321-L353 | [
"def _initialize_list(self):\n if not self._init_boto3_clients():\n logging.error('session initialization was not good')\n raise SystemError\n"
] | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility.smash | python | def smash(self):
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack() | Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L355-L388 | [
"def poll_stack(self):\n \"\"\"\n Spin in a loop while the Cloud Formation process either fails or succeeds\n\n Args:\n None\n\n Returns:\n Good or bad; True or False\n \"\"\"\n logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))\n time.sleep(POLL_INTERVAL... | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._init_boto3_clients | python | def _init_boto3_clients(self):
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False | The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L390-L417 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._get_ssm_parameter | python | def _get_ssm_parameter(self, p):
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None | Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L434-L451 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._fill_parameters | python | def _fill_parameters(self):
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True | Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L453-L498 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._read_tags | python | def _read_tags(self):
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True | Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L500-L528 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._set_update | python | def _set_update(self):
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True | Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L530-L561 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._archive_elements | python | def _archive_elements(self):
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False | Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways. | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L563-L603 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility._craft_s3_keys | python | def _craft_s3_keys(self):
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key | We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L605-L636 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def poll_stack(self):
"""
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
"""
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
muckamuck/stackility | stackility/CloudStackUtility.py | CloudStackUtility.poll_stack | python | def poll_stack(self):
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False | Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False | train | https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L638-L680 | null | class CloudStackUtility:
"""
Cloud stack utility is yet another tool create AWS Cloudformation stacks.
"""
ASK = '[ask]'
SSM = '[ssm:'
_verbose = False
_template = None
_b3Sess = None
_cloudFormation = None
_config = None
_parameters = {}
_stackParameters = []
_s3 = None
_ssm = None
_tags = []
_templateUrl = None
_updateStack = False
_yaml = False
def __init__(self, config_block):
"""
Cloud stack utility init method.
Args:
config_block - a dictionary creates from the CLI driver. See that
script for the things that are required and
optional.
Returns:
not a damn thing
Raises:
SystemError - if everything isn't just right
"""
if config_block:
self._config = config_block
else:
logging.error('config block was garbage')
raise SystemError
def upsert(self):
"""
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
"""
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
def _describe_change_set(self, set_id):
complete_states = ['CREATE_COMPLETE', 'FAILED', 'UNKNOWN']
try:
logging.info('polling change set, POLL_INTERVAL={}'.format(POLL_INTERVAL))
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
while status not in complete_states:
logging.info('current set status: {}'.format(status))
time.sleep(POLL_INTERVAL)
response = self._cloudFormation.describe_change_set(ChangeSetName=set_id)
status = response.get('Status', 'UNKNOWN')
logging.info('current set status: {}'.format(status))
print('\n')
print('Change set report:')
for change in response.get('Changes', []):
print(
json.dumps(
change,
indent=2,
default=json_util.default
)
)
print('\n')
logging.info('cleaning up change set')
self._cloudFormation.delete_change_set(ChangeSetName=set_id)
return True
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return False
def _generate_change_set(self, parameters):
try:
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
set_name = 'chg{}'.format(int(time.time()))
if self._updateStack:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='UPDATE'
)
else:
changes = self._cloudFormation.create_change_set(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ChangeSetName=set_name,
ChangeSetType='CREATE'
)
if self._verbose:
logging.info('Change set: {}'.format(
json.dumps(changes, indent=2, default=json_util.default)
))
return changes.get('Id', None)
except Exception as ruh_roh_shaggy:
if self._verbose:
logging.error(ruh_roh_shaggy, exc_info=True)
else:
logging.error(ruh_roh_shaggy, exc_info=False)
return None
def _render_template(self):
buf = None
try:
context = self._config.get('meta-parameters', None)
if not context:
return True
template_file = self._config.get('environment', {}).get('template', None)
path, filename = os.path.split(template_file)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
)
buf = env.get_template(filename).render(context)
with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp:
tmp.write(buf)
logging.info('template rendered into {}'.format(tmp.name))
self._config['environment']['template'] = tmp.name
except Exception as wtf:
print('error: _render_template() caught {}'.format(wtf))
sys.exit(1)
return buf
def _load_template(self):
template_decoded = False
template_file = self._config.get('environment', {}).get('template', None)
self._template = None
try:
json_stuff = open(template_file)
self._template = json.load(json_stuff)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = False
logging.info('template is JSON')
else:
logging.info('template is not a valid JSON template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(json): {}'.format(x))
logging.info('template is not JSON')
if not template_decoded:
try:
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
with open(template_file, 'r') as f:
self._template = yaml.load(f, Loader=Loader)
if self._template and 'Resources' in self._template:
template_decoded = True
self._yaml = True
logging.info('template is YAML')
else:
logging.info('template is not a valid YAML template')
except Exception as x:
template_decoded = False
logging.debug('Exception caught in load_template(yaml): {}'.format(x))
logging.info('template is not YAML')
return template_decoded
def list(self):
"""
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
def smash(self):
"""
Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
"""
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack()
def _init_boto3_clients(self):
"""
The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False
"""
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _fill_defaults(self):
try:
parms = self._template['Parameters']
for key in parms:
key = str(key)
if 'Default' in parms[key] and key not in self._parameters:
self._parameters[key] = str(parms[key]['Default'])
except Exception as wtf:
logging.error('Exception caught in fill_defaults(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
return True
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
response = self._ssm.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
def _set_update(self):
"""
Determine if we are creating a new stack or updating and existing one.
The update member is set as you would expect at the end of this query.
Args:
None
Returns:
True
"""
try:
self._updateStack = False
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
if stack['StackStatus'] == 'ROLLBACK_COMPLETE':
logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')
del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))
stack_delete = self.poll_stack()
if not stack_delete:
return False
if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']:
self._updateStack = True
except:
self._updateStack = False
logging.info('update_stack: ' + str(self._updateStack))
return True
def _archive_elements(self):
"""
Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways.
"""
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key))
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key))
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key)
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False
def _craft_s3_keys(self):
"""
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
"""
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
def _initialize_list(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _initialize_smash(self):
if not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
def _validate_ini_data(self):
if 'stack_name' not in self._config.get('environment', {}):
return False
elif 'bucket' not in self._config.get('environment', {}):
return False
elif 'template' not in self._config.get('environment', {}):
return False
else:
template_file = self._config.get('environment', {}).get('template', None)
if os.path.isfile(template_file):
return True
else:
logging.error('template file \'{}\' does not exist, I give up!'.format(template_file))
return False
def _initialize_upsert(self):
if not self._validate_ini_data():
logging.error('INI file missing required bits; bucket and/or template and/or stack_name')
raise SystemError
elif not self._render_template():
logging.error('template rendering failed')
raise SystemError
elif not self._load_template():
logging.error('template initialization was not good')
raise SystemError
elif not self._init_boto3_clients():
logging.error('session initialization was not good')
raise SystemError
elif not self._fill_parameters():
logging.error('parameter setup was not good')
raise SystemError
elif not self._read_tags():
logging.error('tags initialization was not good')
raise SystemError
elif not self._archive_elements():
logging.error('saving stuff to S3 did not go well')
raise SystemError
elif not self._set_update():
logging.error('there was a problem determining update or create')
raise SystemError
def _analyze_stuff(self):
template_scanner = self._config.get('analysis', {}).get('template', None)
tags_scanner = self._config.get('analysis', {}).get('tags', None)
if template_scanner or tags_scanner:
r = self._externally_analyze_stuff(template_scanner, tags_scanner)
if not r:
return False
wrk = self._config.get('analysis', {}).get('enforced', 'crap').lower()
rule_exceptions = self._config.get('analysis', {}).get('exceptions', None)
if wrk == 'true' or wrk == 'false':
enforced = wrk == 'true'
self._internally_analyze_stuff(enforced, rule_exceptions)
return True
def _externally_analyze_stuff(self, template_scanner, tags_scanner):
scans_executed = False
tags_scan_status = 0
template_scan_status = 0
the_data = None
try:
if template_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
template_scan_status = answer.get('exit_status', -2)
print('\nTemplate scan:')
print(json.dumps(answer, indent=2))
if tags_scanner:
scans_executed = True
with open(self._config['environment']['template'], 'rb') as template_data:
the_data = template_data.read()
r = requests.post(template_scanner, data=the_data)
answer = json.loads(r.content)
tags_scan_status = answer.get('exit_status', -2)
print('\nTag scan:')
print(json.dumps(answer, indent=2))
if not scans_executed:
return True
elif template_scan_status == 0 and tags_scan_status == 0:
print('All scans successful')
return True
else:
print('Failed scans')
return False
except Exception as wtf:
print('')
logging.info('template_scanner: {}'.format(template_scanner))
logging.info(' tags_scanner: {}'.format(tags_scanner))
print('')
logging.error('Exception caught in analyze_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
def _internally_analyze_stuff(self, enforced, rule_exceptions):
try:
config_dict = {}
config_dict['template_file'] = self._config['environment']['template']
validator = ValidateUtility(config_dict)
_results = validator.validate()
results = json.loads(_results)
for result in results:
try:
error_count = int(result.get('failure_count', 0))
except Exception as strangeness:
logging.warn('internally_analyze_stuff() strangeness: {}'.format(strangeness))
error_count = -1
if enforced:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
if error_count == 0:
logging.info('CloudFormation Validator found zero errors')
elif error_count == 1:
if enforced:
logging.error('CloudFormation Validator found one error')
sys.exit(1)
else:
logging.warn('CloudFormation Validator found one error')
elif error_count > 1:
if enforced:
logging.error(
'CloudFormation Validator found {} errors'.format(error_count)
)
sys.exit(1)
else:
logging.warn(
'CloudFormation Validator found {} errors'.format(error_count)
)
except Exception as ruh_roh_shaggy:
logging.error('internally_analyze_stuff() exploded: {}'.format(ruh_roh_shaggy))
traceback.print_exc(file=sys.stdout)
if enforced:
sys.exit(1)
return True
def get_cloud_formation_client(self):
return self._cloudFormation
|
mozillazg/python-shanbay | shanbay/message.py | Message.send_message | python | def send_message(self, recipient_list, subject, body):
url = 'http://www.shanbay.com/api/v1/message/'
recipient = ','.join(recipient_list)
data = {
'recipient': recipient,
'subject': subject,
'body': body,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
response = self.request(url, 'post', data=data)
return response.ok | 发送站内消息
:param recipient_list: 收件人列表
:param subject: 标题
:param body: 内容(不能超过 1024 个字符) | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/message.py#L33-L49 | [
"def request(self, url, method='get', **kwargs):\n headers = kwargs.setdefault('headers', {})\n headers.setdefault('User-Agent', self._attr('USER_AGENT'))\n headers.setdefault('X-CSRFToken', self.csrftoken)\n headers.setdefault('X-Requested-With', 'XMLHttpRequest')\n try:\n r = getattr(self._r... | class Message(object):
"""站内消息
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
::
>>> from shanbay import Shanbay, Message
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> m = Message(s)
"""
def __init__(self, shanbay):
""" ::
from shanbay import Shanbay, Message
s = Shanbay('username', 'password')
s.login()
m = Message(s)
"""
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
def reply_message(self, message_url, body):
"""回复某条站内消息
:param message_url: 该条消息的页面 URL
:param body: 内容(不能超过 1024 个字符)
"""
id = re.findall(r'(\d+)/?$', message_url)[0]
api = 'http://www.shanbay.com/api/v1/message/%s/reply/'
url = api % id
data = {
'body': body
}
response = self.request(url, 'post', data=data)
return response.json()['status_code'] == 0
|
mozillazg/python-shanbay | shanbay/message.py | Message.reply_message | python | def reply_message(self, message_url, body):
id = re.findall(r'(\d+)/?$', message_url)[0]
api = 'http://www.shanbay.com/api/v1/message/%s/reply/'
url = api % id
data = {
'body': body
}
response = self.request(url, 'post', data=data)
return response.json()['status_code'] == 0 | 回复某条站内消息
:param message_url: 该条消息的页面 URL
:param body: 内容(不能超过 1024 个字符) | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/message.py#L51-L64 | [
"def request(self, url, method='get', **kwargs):\n headers = kwargs.setdefault('headers', {})\n headers.setdefault('User-Agent', self._attr('USER_AGENT'))\n headers.setdefault('X-CSRFToken', self.csrftoken)\n headers.setdefault('X-Requested-With', 'XMLHttpRequest')\n try:\n r = getattr(self._r... | class Message(object):
"""站内消息
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
::
>>> from shanbay import Shanbay, Message
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> m = Message(s)
"""
def __init__(self, shanbay):
""" ::
from shanbay import Shanbay, Message
s = Shanbay('username', 'password')
s.login()
m = Message(s)
"""
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
def send_message(self, recipient_list, subject, body):
"""发送站内消息
:param recipient_list: 收件人列表
:param subject: 标题
:param body: 内容(不能超过 1024 个字符)
"""
url = 'http://www.shanbay.com/api/v1/message/'
recipient = ','.join(recipient_list)
data = {
'recipient': recipient,
'subject': subject,
'body': body,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
response = self.request(url, 'post', data=data)
return response.ok
|
mozillazg/python-shanbay | shanbay/api.py | API.word | python | def word(self, word, url='https://api.shanbay.com/bdc/search/'):
params = {
'word': word
}
return self._request(url, params=params).json() | 查询单词 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L50-L55 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.add_word | python | def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json() | 添加单词 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L58-L63 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.examples | python | def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json() | 获取单词的例句 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L66-L74 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.add_example | python | def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json() | 创建例句 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L77-L85 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.favorite_example | python | def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json() | 收藏例句 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L88-L94 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.delete_example | python | def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
url = url.format(example_id=example_id)
return self._request(url, method='delete').json() | 删除例句 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L97-L101 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.notes | python | def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json() | 获取笔记 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L104-L109 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.add_note | python | def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json() | 创建笔记 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L112-L119 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.favorite_note | python | def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json() | 收藏笔记 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L122-L128 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
@_catch_token_error
def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
"""删除笔记"""
url = url.format(note_id=note_id)
return self._request(url, method='delete').json()
|
mozillazg/python-shanbay | shanbay/api.py | API.delete_note | python | def delete_note(self, note_id,
url='https://api.shanbay.com/bdc/note/{note_id}/'):
url = url.format(note_id=note_id)
return self._request(url, method='delete').json() | 删除笔记 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/api.py#L131-L135 | [
"def _request(self, url, method='get', params=None, data=None):\n logger.debug('method: {0}'.format(method))\n logger.debug('params: {0}'.format(params))\n logger.debug('data: {0}'.format(data))\n kwargs = {}\n if method in ('get', 'delete'):\n kwargs['params'] = params\n elif method in ('p... | class API(object):
def __init__(self, client_id, token):
self.api = OAuth2Session(client_id, token=token)
def _request(self, url, method='get', params=None, data=None):
logger.debug('method: {0}'.format(method))
logger.debug('params: {0}'.format(params))
logger.debug('data: {0}'.format(data))
kwargs = {}
if method in ('get', 'delete'):
kwargs['params'] = params
elif method in ('post',):
kwargs['data'] = data
logger.debug('kwargs: {0}'.format(kwargs))
return getattr(self.api, method)(url, **kwargs)
@_catch_token_error
def user(self, url='https://api.shanbay.com/account/'):
"""获取用户信息"""
return self._request(url).json()
@_catch_token_error
def word(self, word, url='https://api.shanbay.com/bdc/search/'):
"""查询单词"""
params = {
'word': word
}
return self._request(url, params=params).json()
@_catch_token_error
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json()
@_catch_token_error
def add_example(self, word_id, original, translation,
url='https://api.shanbay.com/bdc/example/'):
"""创建例句"""
data = {
'vocabulary': word_id,
'original': original,
'translation': translation
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_example(self, example_id,
url='https://api.shanbay.com/bdc/learning_example/'):
"""收藏例句"""
data = {
'example_id': example_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json()
@_catch_token_error
def notes(self, word_id, url='https://api.shanbay.com/bdc/note/'):
"""获取笔记"""
params = {
'vocabulary_id': word_id
}
return self._request(url, params=params).json()
@_catch_token_error
def add_note(self, word_id, note,
url='https://api.shanbay.com/bdc/note/'):
"""创建笔记"""
data = {
'vocabulary': word_id,
'note': note
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
def favorite_note(self, note_id,
url='https://api.shanbay.com/bdc/learning_note/'):
"""收藏笔记"""
data = {
'note_id': note_id
}
return self._request(url, method='post', data=data).json()
@_catch_token_error
|
mozillazg/python-shanbay | shanbay/team.py | Team.info | python | def info(self):
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
} | 小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
} | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L44-L98 | null | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.update_limit | python | def update_limit(self, days, kind=2, condition='>='):
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team' | 更新成员加入条件
:rtype: bool | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L100-L114 | null | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.members | python | def members(self):
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members | 获取小组所有成员的信息列表 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L116-L121 | [
"def max_page(self):\n \"\"\"获取小组成员管理页面的最大页数\"\"\"\n html = self.request(self.dismiss_url).text\n soup = BeautifulSoup(html)\n # 分页所在 div\n try:\n pagination = soup.find_all(class_='pagination')[0]\n except IndexError as e:\n logger.exception(e)\n return 1\n pages = paginat... | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.max_page | python | def max_page(self):
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1 | 获取小组成员管理页面的最大页数 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L123-L134 | null | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.single_page_members | python | def single_page_members(self, page_number=1):
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members | 获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}] | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L136-L220 | [
"def get_username(self, url):\n html = self.request(url).text\n soup = BeautifulSoup(html)\n t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()\n return t.strip(u'的日记').strip()\n",
"def get_tag_string(html, class_, tag='td', n=0):\n \"\"\"获取单个 tag 的文本数据\"\"\"\n return html... | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.dismiss | python | def dismiss(self, member_ids):
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False | 踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L228-L247 | null | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.forum_id | python | def forum_id(self):
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value'] | 小组发帖要用的 forum_id | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L249-L253 | null | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.new_topic | python | def new_topic(self, title, content):
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id'] | 小组发贴
:return: 帖子 id 或 ``None`` | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L255-L269 | [
"def forum_id(self):\n \"\"\"小组发帖要用的 forum_id\"\"\"\n html = self.request(self.team_url).text\n soup = BeautifulSoup(html)\n return soup.find(id='forum_id').attrs['value']\n"
] | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def reply_topic(self, topic_id, content):
"""小组回帖
:return: 帖子 id 或 ``None``
"""
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/team.py | Team.reply_topic | python | def reply_topic(self, topic_id, content):
data = {
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/thread/%s/post/' % topic_id
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id'] | 小组回帖
:return: 帖子 id 或 ``None`` | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L271-L284 | null | class Team(object):
"""小组管理
:param shanbay: :class:`~shanbay.Shanbay` 实例对象
:param team_url: 小组首页 URL
::
>>> from shanbay import Shanbay, Team
>>> s = Shanbay('username', 'password')
>>> s.login()
>>> t = Team(s, 'http://www.shanbay.com/team/1234/')
"""
def __init__(self, shanbay, team_url):
self.shanbay = shanbay
self._request = shanbay._request
self.request = shanbay.request
self.team_url = team_url
self.team_id = self.get_url_id(team_url)
self.dismiss_url = 'http://www.shanbay.com/team/manage/'
def get_url_id(self, url):
return re.findall(r'/(\d+)/?$', url)[0]
def info(self):
"""小组信息
:return: 小组信息
:rtype: dict
返回值示例 ::
{
'title': u'title', # 标题
'leader': u'leader', # 组长
'date_created': datetime.datetime(2013, 10, 6, 0, 0), # 创建日期
'rank': 1000, # 排名
'number': 10, # 当前成员数
'max_number': 20, # 最大成员数
'rate': 1.112, # 打卡率
'points': 23 # 总成长值
}
"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
team_header = soup.find_all(class_='team-header')[0]
# 标题
title = team_header.find_all(class_='title')[0].text.strip()
# 组长
leader = team_header.find_all(class_='leader'
)[0].find_all('a')[0].text.strip()
# 创建时间
date_str = team_header.find_all(class_='date')[0].text.strip()
date_created = datetime.datetime.strptime(date_str, '%Y/%m/%d')
# 排名
team_stat = soup.find_all(class_='team-stat')[0]
_str = team_stat.find_all(class_='rank')[0].text.strip()
rank = int(re.findall(r'\d+$', _str)[0])
# 成员数
_str = team_stat.find_all(class_='size')[0].text.strip()
number, max_number = map(int, re.findall(r'(\d+)/(\d+)$', _str)[0])
# 打卡率
_str = team_stat.find_all(class_='rate')[0].text.strip()
rate = float(re.findall(r'(\d+\.?(?:\d+)?)%$', _str)[0])
# 总成长值
_str = team_stat.find_all(class_='points')[0].text.strip()
points = int(re.findall(r'\d+$', _str)[0])
return {
'title': title,
'leader': leader,
'date_created': date_created,
'rank': rank,
'number': number,
'max_number': max_number,
'rate': rate,
'points': points
}
def update_limit(self, days, kind=2, condition='>='):
"""更新成员加入条件
:rtype: bool
"""
url = 'http://www.shanbay.com/team/setqualification/%s' % self.team_id
data = {
'kind': kind,
'condition': condition,
'value': days,
'team': self.team_id,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
r = self.request(url, 'post', data=data)
return r.url == 'http://www.shanbay.com/referral/invite/?kind=team'
def members(self):
"""获取小组所有成员的信息列表"""
all_members = []
for page in range(1, self.max_page() + 1):
all_members.extend(self.single_page_members(page))
return all_members
def max_page(self):
"""获取小组成员管理页面的最大页数"""
html = self.request(self.dismiss_url).text
soup = BeautifulSoup(html)
# 分页所在 div
try:
pagination = soup.find_all(class_='pagination')[0]
except IndexError as e:
logger.exception(e)
return 1
pages = pagination.find_all('li')
return int(pages[-2].text) if pages else 1
def single_page_members(self, page_number=1):
"""获取单个页面内的小组成员信息
:param page_number: 页码
:return: 包含小组成员信息的列表
返回值示例: ::
[{
'id': 123, # member_id
'username': 'jim', # username
'nickname': 'Jim', # 昵称
'role': u'小组长', # 身份
'points': 1234, # 贡献成长值
'days': 100, # 组龄
'rate': 99.9, # 打卡率
'checked_yesterday': True, # 昨天是否打卡
'checked': False, # 今天是否打卡
}, {
# ...
}]
"""
url = '%s?page=%s' % (self.dismiss_url, page_number)
html = self.request(url).text
soup = BeautifulSoup(html)
members_html = soup.find(id='members')
if not members_html:
return []
def get_tag_string(html, class_, tag='td', n=0):
"""获取单个 tag 的文本数据"""
return html.find_all(tag, class_=class_)[n].get_text().strip()
members = []
# 获取成员信息
for member_html in members_html.find_all('tr', class_='member'):
_id = member_html.attrs['data-id']
try:
user_url = member_html.find_all('td', class_='user'
)[0].find('a').attrs['href']
username = self.get_username('http://www.shanbay.com'
+ user_url)
except Exception as e:
logger.exception(e)
username = ''
try:
nickname = get_tag_string(member_html, 'nickname', 'a')
except Exception as e:
logger.exception(e)
nickname = username
try:
role = member_html.find_all('td', class_='user'
)[0].find_all('span', class_='label'
)[0].get_text().strip()
except IndexError:
role = ''
except Exception as e:
logger.exception(e)
role = ''
member = {
'id': int(_id),
'username': username,
# 昵称
'nickname': nickname,
# 身份
'role': role,
# 贡献成长值
'points': int(get_tag_string(member_html, 'points')),
# 组龄
'days': int(get_tag_string(member_html, 'days')),
# 打卡率
'rate': float(get_tag_string(member_html, 'rate'
).split('%')[0]),
# 昨天是否打卡
'checked_yesterday': get_tag_string(member_html, 'checked'
) != '未打卡',
# 今天是否打卡
'checked': get_tag_string(member_html, 'checked',
n=1) != '未打卡',
}
members.append(member)
return members
def get_username(self, url):
html = self.request(url).text
soup = BeautifulSoup(html)
t = soup.find_all(class_='page-header')[0].find_all('h2')[0].text.strip()
return t.strip(u'的日记').strip()
def dismiss(self, member_ids):
"""踢人. 注意别把自己给踢了.
:param member_ids: 组员 ids
:return: bool
"""
url = 'http://www.shanbay.com/api/v1/team/member/'
data = {
'action': 'dispel',
}
if isinstance(member_ids, (list, tuple)):
data['ids'] = ','.join(map(str, member_ids))
else:
data['ids'] = member_ids
r = self.request(url, 'put', data=data)
try:
return r.json()['msg'] == "SUCCESS"
except Exception as e:
logger.exception(e)
return False
def forum_id(self):
"""小组发帖要用的 forum_id"""
html = self.request(self.team_url).text
soup = BeautifulSoup(html)
return soup.find(id='forum_id').attrs['value']
def new_topic(self, title, content):
"""小组发贴
:return: 帖子 id 或 ``None``
"""
data = {
'title': title,
'body': content,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self.forum_id()
r = self.request(url, 'post', data=data)
j = r.json()
if j['status_code'] == 0:
return j['data']['thread']['id']
|
mozillazg/python-shanbay | shanbay/__init__.py | Shanbay.login | python | def login(self, **kwargs):
payload = {
'username': self.username,
'password': self.password,
}
headers = kwargs.setdefault('headers', {})
headers.setdefault(
'Referer',
'https://www.shanbay.com/web/account/login'
)
url = 'https://www.shanbay.com/api/v1/account/login/web/'
response = self.request(url, 'put', json=payload, **kwargs)
r_json = response.json()
return r_json['status_code'] == 0 | 登录 | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/__init__.py#L68-L82 | [
"def request(self, url, method='get', **kwargs):\n headers = kwargs.setdefault('headers', {})\n headers.setdefault('User-Agent', self._attr('USER_AGENT'))\n headers.setdefault('X-CSRFToken', self.csrftoken)\n headers.setdefault('X-Requested-With', 'XMLHttpRequest')\n try:\n r = getattr(self._r... | class Shanbay(object):
"""
:param username: 用户名
:param password: 密码
::
>>> from shanbay import Shanbay
>>> s = Shanbay('username', 'password')
>>> s.login()
True
"""
USER_AGENT = 'python-shanbay/%s' % __version__
def __init__(self, username, password):
self._request = requests.Session()
self.username = username
self.password = password
self.csrftoken = ''
def _attr(self, name):
return getattr(self.__class__, name)
def request(self, url, method='get', **kwargs):
headers = kwargs.setdefault('headers', {})
headers.setdefault('User-Agent', self._attr('USER_AGENT'))
headers.setdefault('X-CSRFToken', self.csrftoken)
headers.setdefault('X-Requested-With', 'XMLHttpRequest')
try:
r = getattr(self._request, method)(url, **kwargs)
except requests.exceptions.RequestException as e:
raise ConnectException(e)
self.csrftoken = r.cookies.get('csrftoken', '')
content_type = r.headers.get('Content-Type', '')
if r.url.endswith('/accounts/login/') or \
(content_type.startswith('application/json') and
r.json()['status_code'] == 401):
raise AuthException('Need login')
return r
def server_date_utc(self):
"""获取扇贝网服务器时间(UTC 时间)"""
date_str = self.request('http://www.shanbay.com', 'head'
).headers['date']
date_utc = datetime.datetime.strptime(date_str,
'%a, %d %b %Y %H:%M:%S GMT')
return date_utc
def server_date(self):
"""获取扇贝网服务器时间(北京时间)"""
date_utc = self.server_date_utc()
# 北京时间 = UTC + 8 hours
return date_utc + datetime.timedelta(hours=8)
|
mozillazg/python-shanbay | shanbay/__init__.py | Shanbay.server_date_utc | python | def server_date_utc(self):
date_str = self.request('http://www.shanbay.com', 'head'
).headers['date']
date_utc = datetime.datetime.strptime(date_str,
'%a, %d %b %Y %H:%M:%S GMT')
return date_utc | 获取扇贝网服务器时间(UTC 时间) | train | https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/__init__.py#L84-L90 | [
"def request(self, url, method='get', **kwargs):\n headers = kwargs.setdefault('headers', {})\n headers.setdefault('User-Agent', self._attr('USER_AGENT'))\n headers.setdefault('X-CSRFToken', self.csrftoken)\n headers.setdefault('X-Requested-With', 'XMLHttpRequest')\n try:\n r = getattr(self._r... | class Shanbay(object):
"""
:param username: 用户名
:param password: 密码
::
>>> from shanbay import Shanbay
>>> s = Shanbay('username', 'password')
>>> s.login()
True
"""
USER_AGENT = 'python-shanbay/%s' % __version__
def __init__(self, username, password):
self._request = requests.Session()
self.username = username
self.password = password
self.csrftoken = ''
def _attr(self, name):
return getattr(self.__class__, name)
def request(self, url, method='get', **kwargs):
headers = kwargs.setdefault('headers', {})
headers.setdefault('User-Agent', self._attr('USER_AGENT'))
headers.setdefault('X-CSRFToken', self.csrftoken)
headers.setdefault('X-Requested-With', 'XMLHttpRequest')
try:
r = getattr(self._request, method)(url, **kwargs)
except requests.exceptions.RequestException as e:
raise ConnectException(e)
self.csrftoken = r.cookies.get('csrftoken', '')
content_type = r.headers.get('Content-Type', '')
if r.url.endswith('/accounts/login/') or \
(content_type.startswith('application/json') and
r.json()['status_code'] == 401):
raise AuthException('Need login')
return r
def login(self, **kwargs):
"""登录"""
payload = {
'username': self.username,
'password': self.password,
}
headers = kwargs.setdefault('headers', {})
headers.setdefault(
'Referer',
'https://www.shanbay.com/web/account/login'
)
url = 'https://www.shanbay.com/api/v1/account/login/web/'
response = self.request(url, 'put', json=payload, **kwargs)
r_json = response.json()
return r_json['status_code'] == 0
def server_date(self):
"""获取扇贝网服务器时间(北京时间)"""
date_utc = self.server_date_utc()
# 北京时间 = UTC + 8 hours
return date_utc + datetime.timedelta(hours=8)
|
atarashansky/self-assembling-manifold | SAM.py | SAM.preprocess_data | python | def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X | Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L143-L346 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.load_data | python | def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad') | Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes). | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L348-L427 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.save_sparse_data | python | def save_sparse_data(self, fname):
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb')) | Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L429-L446 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.save_anndata | python | def save_anndata(self, fname, data = 'adata_raw', **kwargs):
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs) | Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L448-L458 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.load_annotations | python | def load_annotations(self, aname, sep=','):
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1) | Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L460-L489 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.dispersion_ranking_NN | python | def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights | Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L491-L532 | [
"def knn_avg(self, nnm=None):\n\n if (nnm is None):\n nnm = self.adata.uns['neighbors']['connectivities']\n\n D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])\n self.adata.layers['X_knn_avg'] = D_avg\n"
] | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.calculate_regression_PCs | python | def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return | Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues). | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L534-L586 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.regress_genes | python | def regress_genes(self, PCs):
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y) | Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L588-L610 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.run | python | def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds') | Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L612-L779 | [
"def dispersion_ranking_NN(self, nnm, num_norm_avg=50):\n \"\"\"Computes the spatial dispersion factors for each gene.\n\n Parameters\n ----------\n nnm - scipy.sparse, float\n Square cell-to-cell nearest-neighbor matrix.\n\n num_norm_avg - int, optional, default 50\n The top 'num_norm_... | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.plot_correlated_groups | python | def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs) | Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L857-L885 | [
"def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):\n \"\"\"Display a gene's expressions.\n\n Displays a scatter plot using the SAM projection or another input\n projection with a particular gene's expressions overlaid.\n\n Parameters\n ----------\n gene - string\n a case-... | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.plot_correlated_genes | python | def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:] | Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L887-L924 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.corr_bin_genes | python | def corr_bin_genes(self, number_of_features=None, input_gene=None):
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups | A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L926-L1009 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.run_tsne | python | def run_tsne(self, X=None, metric='correlation', **kwargs):
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d | Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1011-L1026 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.run_umap | python | def run_umap(self, X=None, metric=None, **kwargs):
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d | Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1028-L1048 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.run_diff_umap | python | def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False) | Experimental -- running UMAP on the diffusion components | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1050-L1066 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.scatter | python | def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes) | Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1077-L1175 | [
"def convert_annotations(A):\n x = np.unique(A)\n y = np.zeros(A.size)\n z = 0\n for i in x:\n y[A == i] = z\n z += 1\n\n return y.astype('int')\n"
] | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.show_gene_expression | python | def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name) | Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1177-L1231 | [
"def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,\n edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):\n \"\"\"Display a scatter plot.\n\n Displays a scatter plot using the SAM projection or another input\n projection with or without annotations.\n\n Parameters... | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.louvain_clustering | python | def louvain_clustering(self, X=None, res=1, method='modularity'):
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership) | Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1265-L1316 | [
"def sparse_knn(D, k):\n D1 = D.tocoo()\n idr = np.argsort(D1.row)\n D1.row[:] = D1.row[idr]\n D1.col[:] = D1.col[idr]\n D1.data[:] = D1.data[idr]\n\n _, ind = np.unique(D1.row, return_index=True)\n ind = np.append(ind, D1.data.size)\n for i in range(ind.size - 1):\n idx = np.argsort(... | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.kmeans_clustering | python | def kmeans_clustering(self, numc, X=None, npcs=15):
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl | Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1318-L1350 | null | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.identify_marker_genes_rf | python | def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers | Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1404-L1471 | [
"def search_string(vec, s):\n m = []\n s = s.lower()\n for i in range(len(vec)):\n st = vec[i].lower()\n b = st.find(s)\n if(b != -1):\n m.append(i)\n if(len(m) > 0):\n return vec[np.array(m)], np.array(m)\n else:\n return [-1, -1]\n"
] | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.identify_marker_genes_ratio | python | def identify_marker_genes_ratio(self, labels=None):
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers | Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1473-L1519 | [
"def search_string(vec, s):\n m = []\n s = s.lower()\n for i in range(len(vec)):\n st = vec[i].lower()\n b = st.find(s)\n if(b != -1):\n m.append(i)\n if(len(m) > 0):\n return vec[np.array(m)], np.array(m)\n else:\n return [-1, -1]\n"
] | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | SAM.py | SAM.identify_marker_genes_corr | python | def identify_marker_genes_corr(self, labels=None, n_genes=4000):
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers | Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1521-L1575 | [
"def search_string(vec, s):\n m = []\n s = s.lower()\n for i in range(len(vec)):\n st = vec[i].lower()\n b = st.find(s)\n if(b != -1):\n m.append(i)\n if(len(m) > 0):\n return vec[np.array(m)], np.array(m)\n else:\n return [-1, -1]\n"
] | class SAM(object):
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
Parameters
----------
counts : tuple or list (scipy.sparse matrix, numpy.ndarray,numpy.ndarray),
OR tuple or list (numpy.ndarray, numpy.ndarray,numpy.ndarray), OR
pandas.DataFrame, OR anndata.AnnData
If a tuple or list, it should contain the gene expression data
(scipy.sparse or numpy.ndarray) matrix (cells x genes), numpy array of
gene IDs, and numpy array of cell IDs in that order.
If a pandas.DataFrame, it should be (cells x genes)
Only use this argument if you want to pass in preloaded data. Otherwise
use one of the load functions.
annotations : numpy.ndarray, optional, default None
A Numpy array of cell annotations.
Attributes
----------
k: int
The number of nearest neighbors to identify for each cell
when constructing the nearest neighbor graph.
distance: str
The distance metric used when constructing the cell-to-cell
distance matrix.
adata_raw: AnnData
An AnnData object containing the raw, unfiltered input data.
adata: AnnData
An AnnData object containing all processed data and SAM outputs.
"""
def __init__(self, counts=None, annotations=None):
if isinstance(counts, tuple) or isinstance(counts, list):
raw_data, all_gene_names, all_cell_names = counts
if isinstance(raw_data, np.ndarray):
raw_data = sp.csr_matrix(raw_data)
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, pd.DataFrame):
raw_data = sp.csr_matrix(counts.values)
all_gene_names = np.array(list(counts.columns.values))
all_cell_names = np.array(list(counts.index.values))
self.adata_raw = AnnData(
X=raw_data, obs={
'obs_names': all_cell_names}, var={
'var_names': all_gene_names})
elif isinstance(counts, AnnData):
all_cell_names=np.array(list(counts.obs_names))
all_gene_names=np.array(list(counts.var_names))
self.adata_raw = counts
elif counts is not None:
raise Exception(
"\'counts\' must be either a tuple/list of "
"(data,gene IDs,cell IDs) or a Pandas DataFrame of"
"cells x genes")
if(annotations is not None):
annotations = np.array(list(annotations))
if counts is not None:
self.adata_raw.obs['annotations'] = pd.Categorical(annotations)
if(counts is not None):
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = self.adata.X
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X
def load_data(self, filename, transpose=True,
save_sparse_file='h5ad', sep=',', **kwargs):
"""Loads the specified data file. The file can be a table of
read counts (i.e. '.csv' or '.txt'), with genes as rows and cells
as columns by default. The file can also be a pickle file (output from
'save_sparse_data') or an h5ad file (output from 'save_anndata').
This function that loads the file specified by 'filename'.
Parameters
----------
filename - string
The path to the tabular raw expression counts file.
sep - string, optional, default ','
The delimeter used to read the input data table. By default
assumes the input table is delimited by commas.
save_sparse_file - str, optional, default 'h5ad'
If 'h5ad', writes the SAM 'adata_raw' object to a h5ad file
(the native AnnData file format) to the same folder as the original
data for faster loading in the future. If 'p', pickles the sparse
data structure, cell names, and gene names in the same folder as
the original data for faster loading in the future.
transpose - bool, optional, default True
By default, assumes file is (genes x cells). Set this to False if
the file has dimensions (cells x genes).
"""
if filename.split('.')[-1] == 'p':
raw_data, all_cell_names, all_gene_names = (
pickle.load(open(filename, 'rb')))
if(transpose):
raw_data = raw_data.T
if raw_data.getformat()=='csc':
print("Converting sparse matrix to csr format...")
raw_data=raw_data.tocsr()
save_sparse_file = None
elif filename.split('.')[-1] != 'h5ad':
df = pd.read_csv(filename, sep=sep, index_col=0)
if(transpose):
dataset = df.T
else:
dataset = df
raw_data = sp.csr_matrix(dataset.values)
all_cell_names = np.array(list(dataset.index.values))
all_gene_names = np.array(list(dataset.columns.values))
if filename.split('.')[-1] != 'h5ad':
self.adata_raw = AnnData(X=raw_data, obs={'obs_names': all_cell_names},
var={'var_names': all_gene_names})
if(np.unique(all_gene_names).size != all_gene_names.size):
self.adata_raw.var_names_make_unique()
if(np.unique(all_cell_names).size != all_cell_names.size):
self.adata_raw.obs_names_make_unique()
self.adata = self.adata_raw.copy()
self.adata.layers['X_disp'] = raw_data
else:
self.adata_raw = anndata.read_h5ad(filename, **kwargs)
self.adata = self.adata_raw.copy()
if 'X_disp' not in list(self.adata.layers.keys()):
self.adata.layers['X_disp'] = self.adata.X
save_sparse_file = None
if(save_sparse_file == 'p'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_sparse_data(path + new_sparse_file + '_sparse.p')
elif(save_sparse_file == 'h5ad'):
new_sparse_file = '.'.join(filename.split('/')[-1].split('.')[:-1])
path = filename[:filename.find(filename.split('/')[-1])]
self.save_anndata(path + new_sparse_file + '_SAM.h5ad')
def save_sparse_data(self, fname):
"""Saves the tuple (raw_data,all_cell_names,all_gene_names) in a
Pickle file.
Parameters
----------
fname - string
The filename of the output file.
"""
data = self.adata_raw.X.T
if data.getformat()=='csr':
data=data.tocsc()
cell_names = np.array(list(self.adata_raw.obs_names))
gene_names = np.array(list(self.adata_raw.var_names))
pickle.dump((data, cell_names, gene_names), open(fname, 'wb'))
def save_anndata(self, fname, data = 'adata_raw', **kwargs):
"""Saves `adata_raw` to a .h5ad file (AnnData's native file format).
Parameters
----------
fname - string
The filename of the output file.
"""
x = self.__dict__[data]
x.write_h5ad(fname, **kwargs)
def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1)
def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights
def calculate_regression_PCs(self, genes=None, npcs=None, plot=False):
"""Computes the contribution of the gene IDs in 'genes' to each
principal component (PC) of the filtered expression data as the mean of
the absolute value of the corresponding gene loadings. High values
correspond to PCs that are highly correlated with the features in
'genes'. These PCs can then be regressed out of the data using
'regress_genes'.
Parameters
----------
genes - numpy.array or list
Genes for which contribution to each PC will be calculated.
npcs - int, optional, default None
How many PCs to calculate when computing PCA of the filtered and
log-transformed expression data. If None, calculate all PCs.
plot - bool, optional, default False
If True, plot the scores reflecting how correlated each PC is with
genes of interest. Otherwise, do not plot anything.
Returns:
-------
x - numpy.array
Scores reflecting how correlated each PC is with the genes of
interest (ordered by decreasing eigenvalues).
"""
from sklearn.decomposition import PCA
if npcs is None:
npcs = self.adata.X.shape[0]
pca = PCA(n_components=npcs)
pc = pca.fit_transform(self.adata.X.toarray())
self.regression_pca = pca
self.regression_pcs = pc
gene_names = np.array(list(self.adata.var_names))
if(genes is not None):
idx = np.where(np.in1d(gene_names, genes))[0]
sx = pca.components_[:, idx]
x = np.abs(sx).mean(1)
if plot:
plt.figure()
plt.plot(x)
return x
else:
return
def regress_genes(self, PCs):
"""Regress out the principal components in 'PCs' from the filtered
expression data ('SAM.D'). Assumes 'calculate_regression_PCs' has
been previously called.
Parameters
----------
PCs - int, numpy.array, list
The principal components to regress out of the expression data.
"""
ind = [PCs]
ind = np.array(ind).flatten()
try:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :] * self.adata.var[
'weights'].values)
except BaseException:
y = self.adata.X.toarray() - self.regression_pcs[:, ind].dot(
self.regression_pca.components_[ind, :])
self.adata.X = sp.csr_matrix(y)
def run(self,
max_iter=10,
verbose=True,
projection='umap',
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance='correlation',
preprocessing='Normalizer',
proj_kwargs={}):
"""Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
"""
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print('RUNNING SAM')
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print('Iteration: ' + str(i) + ', Convergence: ' + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns['ranked_genes'] = ranked_genes
self.adata.obsm['X_pca'] = wPCA_data
self.adata.uns['neighbors'] = {}
self.adata.uns['neighbors']['connectivities'] = EDM
if(projection == 'tsne'):
print('Computing the t-SNE embedding...')
self.run_tsne(**proj_kwargs)
elif(projection == 'umap'):
print('Computing the UMAP embedding...')
self.run_umap(**proj_kwargs)
elif(projection == 'diff_umap'):
print('Computing the diffusion UMAP embedding...')
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print('Elapsed time: ' + str(elapsed) + ' seconds')
def calculate_nnm(
self,
D,
W,
n_genes,
preprocessing,
npcs,
numcells,
num_norm_avg):
if(n_genes is None):
gkeep = np.arange(W.size)
else:
gkeep = np.sort(np.argsort(-W)[:n_genes])
if preprocessing == 'Normalizer':
Ds = D[:, gkeep].toarray()
Ds = Normalizer().fit_transform(Ds)
elif preprocessing == 'StandardScaler':
Ds = D[:, gkeep].toarray()
Ds = StandardScaler(with_mean=True).fit_transform(Ds)
Ds[Ds > 5] = 5
Ds[Ds < -5] = -5
else:
Ds = D[:, gkeep].toarray()
D_sub = Ds * (W[gkeep])
if numcells > 500:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='auto')
else:
g_weighted, pca = ut.weighted_PCA(D_sub, npcs=min(
npcs, min(D.shape)), do_weight=True, solver='full')
if self.distance == 'euclidean':
g_weighted = Normalizer().fit_transform(g_weighted)
self.adata.uns['pca_obj'] = pca
EDM = self.calc_nnm(g_weighted)
W = self.dispersion_ranking_NN(
EDM, num_norm_avg=num_norm_avg)
self.adata.uns['X_processed'] = D_sub
return W, g_weighted, EDM
def calc_nnm(self,g_weighted):
numcells=g_weighted.shape[0]
if g_weighted.shape[0] > 8000:
nnm, dists = ut.nearest_neighbors(
g_weighted, n_neighbors=self.k, metric=self.distance)
EDM = sp.coo_matrix((numcells, numcells), dtype='i').tolil()
EDM[np.tile(np.arange(nnm.shape[0])[:, None],
(1, nnm.shape[1])).flatten(), nnm.flatten()] = 1
EDM = EDM.tocsr()
else:
dist = ut.compute_distances(g_weighted, self.distance)
nnm = ut.dist_to_nn(dist, self.k)
EDM = sp.csr_matrix(nnm)
return EDM
def _create_dict(self, exc):
self.pickle_dict = self.__dict__.copy()
if(exc):
for i in range(len(exc)):
try:
del self.pickle_dict[exc[i]]
except NameError:
0
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs)
def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:]
def corr_bin_genes(self, number_of_features=None, input_gene=None):
"""A (hacky) method for binning groups of genes correlated along the
SAM manifold.
Parameters
----------
number_of_features - int, optional, default None
The number of genes to bin. Capped at 5000 due to memory
considerations.
input_gene - str, optional, default None
If not None, use this gene as the first seed when growing the
correlation bins.
"""
weights = self.adata.var['spatial_dispersions'].values
all_gene_names = np.array(list(self.adata.var_names))
D_avg = self.adata.layers['X_knn_avg']
idx2 = np.argsort(-weights)[:weights[weights > 0].size]
if(number_of_features is None or number_of_features > idx2.size):
number_of_features = idx2.size
if number_of_features > 1000:
number_of_features = 1000
if(input_gene is not None):
input_gene = np.where(all_gene_names == input_gene)[0]
if(input_gene.size == 0):
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive.")
return
seeds = [np.array([input_gene])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(all_gene_names[seeds[i]])
return geneID_groups[0]
else:
seeds = [np.array([idx2[0]])]
pw_corr = np.corrcoef(
D_avg[:, idx2[:number_of_features]].T.toarray())
for i in range(1, number_of_features):
flag = False
maxd = np.mean(pw_corr[i, :][pw_corr[i, :] > 0])
maxi = 0
for j in range(len(seeds)):
if(pw_corr[np.where(idx2 == seeds[j][0])[0], i]
> maxd):
maxd = pw_corr[np.where(idx2 == seeds[j][0])[0], i]
maxi = j
flag = True
if(not flag):
seeds.append(np.array([idx2[i]]))
else:
seeds[maxi] = np.append(seeds[maxi], idx2[i])
geneID_groups = []
for i in range(len(seeds)):
geneID_groups.append(
all_gene_names[seeds[i]])
self.adata.uns['gene_groups'] = geneID_groups
return geneID_groups
def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d
def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d
def run_diff_umap(self,use_rep='X_pca', metric='euclidean', n_comps=15,
method='gauss', **kwargs):
"""
Experimental -- running UMAP on the diffusion components
"""
import scanpy.api as sc
sc.pp.neighbors(self.adata,use_rep=use_rep,n_neighbors=self.k,
metric=self.distance,method=method)
sc.tl.diffmap(self.adata, n_comps=n_comps)
sc.pp.neighbors(self.adata,use_rep='X_diffmap',n_neighbors=self.k,
metric='euclidean',method=method)
if 'X_umap' in self.adata.obsm.keys():
self.adata.obsm['X_umap_sam'] = self.adata.obsm['X_umap']
sc.tl.umap(self.adata,min_dist=0.1,copy=False)
def knn_avg(self, nnm=None):
if (nnm is None):
nnm = self.adata.uns['neighbors']['connectivities']
D_avg = (nnm / self.k).dot(self.adata.layers['X_disp'])
self.adata.layers['X_knn_avg'] = D_avg
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes)
def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name)
def density_clustering(self, X=None, eps=1, metric='euclidean', **kwargs):
from sklearn.cluster import DBSCAN
if X is None:
X = self.adata.obsm['X_umap']
save = True
else:
save = False
cl = DBSCAN(eps=eps, metric=metric, **kwargs).fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :self.k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['density_clusters'] = pd.Categorical(cl)
else:
return cl
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
def leiden_clustering(self, X=None, res = 1):
import scanpy.api as sc
if X is None:
sc.tl.leiden(self.adata, resolution = res,
key_added='leiden_clusters')
self.adata.obs['leiden_clusters'] = pd.Categorical(self.adata.obs[
'leiden_clusters'].get_values().astype('int'))
else:
sc.tl.leiden(self.adata, resolution = res, adjacency = X,
key_added='leiden_clusters_X')
self.adata.obs['leiden_clusters_X'] =pd.Categorical(self.adata.obs[
'leiden_clusters_X'].get_values().astype('int'))
def hdbknn_clustering(self, X=None, k=None, **kwargs):
import hdbscan
if X is None:
#X = self.adata.obsm['X_pca']
D = self.adata.uns['X_processed']
X = (D-D.mean(0)).dot(self.adata.uns['pca_obj'].components_.T)[:,:15]
X = Normalizer().fit_transform(X)
save = True
else:
save = False
if k is None:
k = 20#self.k
hdb = hdbscan.HDBSCAN(metric='euclidean', **kwargs)
cl = hdb.fit_predict(X)
idx0 = np.where(cl != -1)[0]
idx1 = np.where(cl == -1)[0]
if idx1.size > 0 and idx0.size > 0:
xcmap = ut.generate_euclidean_map(X[idx0, :], X[idx1, :])
knn = np.argsort(xcmap.T, axis=1)[:, :k]
nnm = np.zeros(xcmap.shape).T
nnm[np.tile(np.arange(knn.shape[0])[:, None],
(1, knn.shape[1])).flatten(),
knn.flatten()] = 1
nnmc = np.zeros((nnm.shape[0], cl.max() + 1))
for i in range(cl.max() + 1):
nnmc[:, i] = nnm[:, cl[idx0] == i].sum(1)
cl[idx1] = np.argmax(nnmc, axis=1)
if save:
self.adata.obs['hdbknn_clusters'] = pd.Categorical(cl)
else:
return cl
def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers
def identify_marker_genes_ratio(self, labels=None):
"""
Ranks marker genes for each cluster using a SAM-weighted
expression-ratio approach (works quite well).
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
all_gene_names = np.array(list(self.adata.var_names))
markers={}
s = np.array(self.adata.layers['X_disp'].sum(0)).flatten()
lblsu=np.unique(lbls)
for i in lblsu:
d = np.array(self.adata.layers['X_disp']
[lbls == i, :].sum(0)).flatten()
rat = np.zeros(d.size)
rat[s > 0] = d[s > 0]**2 / s[s > 0] * \
self.adata.var['weights'].values[s > 0]
x = np.argsort(-rat)
markers[i] = all_gene_names[x[:]]
self.adata.uns['marker_genes_ratio'] = markers
return markers
def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers
|
atarashansky/self-assembling-manifold | utilities.py | save_figures | python | def save_figures(filename, fig_IDs=None, **kwargs):
import matplotlib.pyplot as plt
if(fig_IDs is not None):
if(type(fig_IDs) is list):
savetype = 'pdf'
else:
savetype = 'png'
else:
savetype = 'pdf'
if(savetype == 'pdf'):
from matplotlib.backends.backend_pdf import PdfPages
if(len(filename.split('.')) == 1):
filename = filename + '.pdf'
else:
filename = '.'.join(filename.split('.')[:-1])+'.pdf'
pdf = PdfPages(filename)
if fig_IDs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
else:
figs = [plt.figure(n) for n in fig_IDs]
for fig in figs:
fig.savefig(pdf, format='pdf', **kwargs)
pdf.close()
elif(savetype == 'png'):
plt.figure(fig_IDs).savefig(filename, **kwargs) | Save figures.
Parameters
----------
filename - str
Name of output file
fig_IDs - int, numpy.array, list, optional, default None
A list of open figure IDs or a figure ID that will be saved to a
pdf/png file respectively.
**kwargs -
Extra keyword arguments passed into 'matplotlib.pyplot.savefig'. | train | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/utilities.py#L62-L107 | null | import numpy as np
import scipy as sp
import os
import errno
from sklearn.decomposition import PCA, TruncatedSVD
import umap.distances as dist
import umap.sparse as sparse
from umap.rp_tree import rptree_leaf_array, make_forest
from umap.nndescent import (
make_nn_descent,
)
INT32_MIN = np.iinfo(np.int32).min + 1
INT32_MAX = np.iinfo(np.int32).max - 1
__version__ = '0.4.4'
def nearest_neighbors(X, n_neighbors=15, seed=0, metric='correlation'):
distance_func = dist.named_distances[metric]
if metric in ("cosine", "correlation", "dice", "jaccard"):
angular = True
else:
angular = False
random_state = np.random.RandomState(seed=seed)
rng_state = random_state.randint(INT32_MIN, INT32_MAX, 3).astype(np.int64)
metric_nn_descent = make_nn_descent(
distance_func, tuple({}.values())
)
n_trees = 5 + int(round((X.shape[0]) ** 0.5 / 20.0))
n_iters = max(5, int(round(np.log2(X.shape[0]))))
rp_forest = make_forest(X, n_neighbors, n_trees, rng_state, angular)
leaf_array = rptree_leaf_array(rp_forest)
knn_indices, knn_dists = metric_nn_descent(
X,
n_neighbors,
rng_state,
max_candidates=60,
rp_tree_init=True,
leaf_array=leaf_array,
n_iters=n_iters,
verbose=False,
)
return knn_indices, knn_dists
def knndist(nnma):
knn = []
for i in range(nnma.shape[0]):
knn.append(np.where(nnma[i, :] == 1)[0])
knn = np.vstack(knn)
dist = np.ones(knn.shape)
return knn, dist
def save_figures(filename, fig_IDs=None, **kwargs):
"""
Save figures.
Parameters
----------
filename - str
Name of output file
fig_IDs - int, numpy.array, list, optional, default None
A list of open figure IDs or a figure ID that will be saved to a
pdf/png file respectively.
**kwargs -
Extra keyword arguments passed into 'matplotlib.pyplot.savefig'.
"""
import matplotlib.pyplot as plt
if(fig_IDs is not None):
if(type(fig_IDs) is list):
savetype = 'pdf'
else:
savetype = 'png'
else:
savetype = 'pdf'
if(savetype == 'pdf'):
from matplotlib.backends.backend_pdf import PdfPages
if(len(filename.split('.')) == 1):
filename = filename + '.pdf'
else:
filename = '.'.join(filename.split('.')[:-1])+'.pdf'
pdf = PdfPages(filename)
if fig_IDs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
else:
figs = [plt.figure(n) for n in fig_IDs]
for fig in figs:
fig.savefig(pdf, format='pdf', **kwargs)
pdf.close()
elif(savetype == 'png'):
plt.figure(fig_IDs).savefig(filename, **kwargs)
def weighted_PCA(mat, do_weight=True, npcs=None, solver='auto'):
#mat = (mat - np.mean(mat, axis=0))
if(do_weight):
if(min(mat.shape) >= 10000 and npcs is None):
print(
"More than 10,000 cells. Running with 'npcs' set to < 1000 is"
" recommended.")
if(npcs is None):
ncom = min(mat.shape)
else:
ncom = min((min(mat.shape), npcs))
pca = PCA(svd_solver=solver, n_components=ncom)
reduced = pca.fit_transform(mat)
scaled_eigenvalues = reduced.var(0)
scaled_eigenvalues = scaled_eigenvalues / scaled_eigenvalues.max()
reduced_weighted = reduced * scaled_eigenvalues[None, :]**0.5
else:
pca = PCA(n_components=npcs, svd_solver=solver)
reduced = pca.fit_transform(mat)
if reduced.shape[1] == 1:
pca = PCA(n_components=2, svd_solver=solver)
reduced = pca.fit_transform(mat)
reduced_weighted = reduced
return reduced_weighted, pca
def weighted_sparse_PCA(mat, do_weight=True, npcs=None):
if(do_weight):
if(min(mat.shape) >= 10000 and npcs is None):
print(
"More than 10,000 cells. Running with 'npcs' set to < 1000 is"
" recommended.")
if(npcs is None):
ncom = min(mat.shape)
else:
ncom = min((min(mat.shape), npcs))
pca = TruncatedSVD(n_components=ncom)
reduced = pca.fit_transform(mat)
scaled_eigenvalues = reduced.var(0)
scaled_eigenvalues = scaled_eigenvalues / scaled_eigenvalues.max()
reduced_weighted = reduced * scaled_eigenvalues[None, :]**0.5
else:
pca = TruncatedSVD(n_components=npcs, svd_solver='auto')
reduced = pca.fit_transform(mat)
if reduced.shape[1] == 1:
pca = TruncatedSVD(n_components=2, svd_solver='auto')
reduced = pca.fit_transform(mat)
reduced_weighted = reduced
return reduced_weighted, pca
def transform_wPCA(mat, pca):
mat = (mat - pca.mean_)
reduced = mat.dot(pca.components_.T)
v = pca.explained_variance_#.var(0)
scaled_eigenvalues = v / v.max()
reduced_weighted = np.array(reduced) * scaled_eigenvalues[None, :]**0.5
return reduced_weighted
def search_string(vec, s):
m = []
s = s.lower()
for i in range(len(vec)):
st = vec[i].lower()
b = st.find(s)
if(b != -1):
m.append(i)
if(len(m) > 0):
return vec[np.array(m)], np.array(m)
else:
return [-1, -1]
def distance_matrix_error(dist1, dist2):
s = 0
for k in range(dist1.shape[0]):
s += np.corrcoef(dist1[k, :], dist2[k, :])[0, 1]
return 1 - s / dist1.shape[0]
def generate_euclidean_map(A, B):
a = (A**2).sum(1).flatten()
b = (B**2).sum(1).flatten()
x = a[:, None] + b[None, :] - 2 * np.dot(A, B.T)
x[x < 0] = 0
return np.sqrt(x)
def generate_correlation_map(x, y):
mu_x = x.mean(1)
mu_y = y.mean(1)
n = x.shape[1]
if n != y.shape[1]:
raise ValueError('x and y must ' +
'have the same number of timepoints.')
s_x = x.std(1, ddof=n - 1)
s_y = y.std(1, ddof=n - 1)
cov = np.dot(x, y.T) - n * np.dot(mu_x[:, None], mu_y[None, :])
return cov / np.dot(s_x[:, None], s_y[None, :])
def extract_annotation(cn, x, c='_'):
m = []
for i in range(cn.size):
m.append(cn[i].split(c)[x])
return np.array(m)
def isolate(dt, x1, x2, y1, y2):
return np.where(np.logical_and(np.logical_and(
dt[:, 0] > x1, dt[:, 0] < x2), np.logical_and(dt[:, 1] > y1,
dt[:, 1] < y2)))[0]
def to_lower(y):
x = y.copy().flatten()
for i in range(x.size):
x[i] = x[i].lower()
return x
def to_upper(y):
x = y.copy().flatten()
for i in range(x.size):
x[i] = x[i].upper()
return x
def create_folder(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def convert_annotations(A):
x = np.unique(A)
y = np.zeros(A.size)
z = 0
for i in x:
y[A == i] = z
z += 1
return y.astype('int')
def compute_distances(A, dm):
if(dm == 'euclidean'):
m = np.dot(A, A.T)
h = np.diag(m)
x = h[:, None] + h[None, :] - 2 * m
x[x < 0] = 0
dist = np.sqrt(x)
elif(dm == 'correlation'):
dist = 1 - np.corrcoef(A)
else:
dist = sp.spatial.distance.squareform(
sp.spatial.distance.pdist(A, metric=dm))
return dist
def dist_to_nn(d, K):
E = d.copy()
np.fill_diagonal(E, -1)
M = np.max(E) * 2
x = np.argsort(E, axis=1)[:, :K]
E[np.tile(np.arange(E.shape[0]).reshape(E.shape[0], -1),
(1, x.shape[1])).flatten(), x.flatten()] = M
E[E < M] = 0
E[E > 0] = 1
return E # ,x
|
cox-labs/perseuspy | perseuspy/io/perseus/matrix.py | read_annotations | python | def read_annotations(path_or_file, separator='\t', reset=True):
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations | Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L22-L50 | null | import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
def create_column_index(annotations):
"""
Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''.
"""
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index
def read_perseus(path_or_file, **kwargs):
"""
Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame
"""
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df
import numpy as np
def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
"""
Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
"""
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator)
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
|
cox-labs/perseuspy | perseuspy/io/perseus/matrix.py | annotation_rows | python | def annotation_rows(prefix, annotations):
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)} | Helper function to extract N: and C: rows from annotations and pad their values | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L52-L58 | null | import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def create_column_index(annotations):
"""
Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''.
"""
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index
def read_perseus(path_or_file, **kwargs):
"""
Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame
"""
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df
import numpy as np
def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
"""
Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
"""
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator)
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
|
cox-labs/perseuspy | perseuspy/io/perseus/matrix.py | create_column_index | python | def create_column_index(annotations):
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index | Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L61-L77 | [
"def annotation_rows(prefix, annotations):\n \"\"\"\n Helper function to extract N: and C: rows from annotations and pad their values\n \"\"\"\n ncol = len(annotations['Column Name'])\n return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))\n for name, values in annotat... | import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
def read_perseus(path_or_file, **kwargs):
"""
Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame
"""
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df
import numpy as np
def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
"""
Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
"""
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator)
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
|
cox-labs/perseuspy | perseuspy/io/perseus/matrix.py | read_perseus | python | def read_perseus(path_or_file, **kwargs):
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df | Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L79-L102 | [
"def read_annotations(path_or_file, separator='\\t', reset=True):\n \"\"\"\n Read all annotations from the specified file.\n\n >>> annotations = read_annotations(path_or_file, separator)\n >>> colnames = annotations['Column Name']\n >>> types = annotations['Type']\n >>> annot_row = annotations['An... | import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
def create_column_index(annotations):
"""
Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''.
"""
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index
import numpy as np
def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
"""
Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
"""
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator)
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
|
cox-labs/perseuspy | perseuspy/io/perseus/matrix.py | to_perseus | python | def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator) | Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]). | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L105-L147 | [
"def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):\n \"\"\"\n All numeric columns up-until the first non-numeric column are considered main columns.\n :param df: The pd.DataFrame\n :param index_level: Name of the index level of the column names. Default 'Column Name... | import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
def create_column_index(annotations):
"""
Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''.
"""
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index
def read_perseus(path_or_file, **kwargs):
"""
Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame
"""
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df
import numpy as np
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
|
cox-labs/perseuspy | perseuspy/io/perseus/matrix.py | _infer_main_columns | python | def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns | All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L177-L192 | null | import numpy as np
import pandas as pd
from collections import OrderedDict
separator = '\t'
def multi_numeric_converter(numbers):
return [float(num) for num in numbers.split(';') if num != '']
converters = {'M': multi_numeric_converter}
perseus_to_dtype = {'E' : float, 'T' : str, 'C' : 'category', 'N' : float}
def dtype_to_perseus(dtype):
if type(dtype) is pd.core.dtypes.dtypes.CategoricalDtype:
return 'C'
else:
mapping = {np.dtype('float'): 'N', np.dtype('str'): 'T',
np.dtype('object'): 'T', np.dtype('int64'): 'N',
np.dtype('bool'): 'C'}
return mapping[dtype]
def read_annotations(path_or_file, separator='\t', reset=True):
"""
Read all annotations from the specified file.
>>> annotations = read_annotations(path_or_file, separator)
>>> colnames = annotations['Column Name']
>>> types = annotations['Type']
>>> annot_row = annotations['Annot. row name']
:param path_or_file: Path or file-like object
:param separator: Column separator
:param reset: Reset the file after reading. Useful for file-like, no-op for paths.
:returns: Ordered dictionary of annotations.
"""
annotations = OrderedDict({})
with PathOrFile(path_or_file, 'r', reset=reset) as f:
annotations['Column Name'] = f.readline().strip().split(separator)
for line in f:
if line.startswith('#!{'):
tokens = line.strip().split(separator)
_name, first_value = tokens[0].split('}')
name = _name.replace('#!{', '')
values = [first_value] + tokens[1:]
if name == 'Type':
colnames = annotations['Column Name']
annotations['dtype'] = {colnames[i]: perseus_to_dtype[x] for i, x in enumerate(values) if x in perseus_to_dtype}
annotations['converters'] = {colnames[i]: converters[x] for i, x in enumerate(values) if x in converters}
annotations[name] = values
return annotations
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
def create_column_index(annotations):
"""
Create a pd.MultiIndex using the column names and any categorical rows.
Note that also non-main columns will be assigned a default category ''.
"""
_column_index = OrderedDict({'Column Name' : annotations['Column Name']})
categorical_rows = annotation_rows('C:', annotations)
_column_index.update(categorical_rows)
numerical_rows = {name: [float(x) if x != '' else float('NaN') for x in values]
for name, values in annotation_rows('N:', annotations).items()} # to floats
_column_index.update(numerical_rows)
column_index = pd.MultiIndex.from_tuples(list(zip(*_column_index.values())), names=list(_column_index.keys()))
if len(column_index.names) == 1:
# flatten single-level index
name = column_index.names[0]
column_index = column_index.get_level_values(name)
return column_index
def read_perseus(path_or_file, **kwargs):
"""
Read a Perseus-formatted matrix into a pd.DataFrame.
Annotation rows will be converted into a multi-index.
By monkey-patching the returned pd.DataFrame a `to_perseus`
method for exporting the pd.DataFrame is made available.
:param path_or_file: File path or file-like object
:param kwargs: Keyword arguments passed as-is to pandas.read_csv
:returns: The parsed data frame
"""
annotations = read_annotations(path_or_file, separator)
column_index = create_column_index(annotations)
if 'usecols' in kwargs:
usecols = kwargs['usecols']
if type(usecols[0]) is str:
usecols = sorted([list(column_index).index(x) for x in usecols])
column_index = column_index[usecols]
kwargs['dtype'] = dict(kwargs.get('dtype', {}), **annotations.get('dtype', {}))
kwargs['converters'] = dict(kwargs.get('converters', {}), **annotations.get('converters', {}))
df = pd.read_csv(path_or_file, sep=separator, comment='#', **kwargs)
df.columns = column_index
return df
import numpy as np
def to_perseus(df, path_or_file, main_columns=None,
separator=separator,
convert_bool_to_category=True,
numerical_annotation_rows = set([])):
"""
Save pd.DataFrame to Perseus text format.
:param df: pd.DataFrame.
:param path_or_file: File name or file-like object.
:param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns.
:param separator: For separating fields, default='\t'.
:param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True.
:param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
"""
_df = df.copy()
if not _df.columns.name:
_df.columns.name = 'Column Name'
column_names = _df.columns.get_level_values('Column Name')
annotations = {}
main_columns = _infer_main_columns(_df) if main_columns is None else main_columns
annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype)
for i, dtype in enumerate(_df.dtypes)]
# detect multi-numeric columns
for i, column in enumerate(_df.columns):
valid_values = [value for value in _df[column] if value is not None]
if len(valid_values) > 0 and all(type(value) is list for value in valid_values):
annotations['Type'][i] = 'M'
_df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs))
if convert_bool_to_category:
for i, column in enumerate(_df.columns):
if _df.dtypes[i] is np.dtype('bool'):
values = _df[column].values
_df[column][values] = '+'
_df[column][~values] = ''
annotation_row_names = set(_df.columns.names) - {'Column Name'}
for name in annotation_row_names:
annotation_type = 'N' if name in numerical_annotation_rows else 'C'
annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name)
with PathOrFile(path_or_file, 'w') as f:
f.write(separator.join(column_names) + '\n')
for name, values in annotations.items():
f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values])))
_df.to_csv(f, header=None, index=False, sep=separator)
class PathOrFile():
"""Small context manager for file paths or file-like objects
:param path_or_file: Path to a file or file-like object
:param mode: Set reading/writing mode
:param reset: Reset file-like to initial position. Has no effect on path."""
def __init__(self, path_or_file, mode = None, reset=False):
self.path_or_file = path_or_file
self.mode = mode
self.isPath = isinstance(path_or_file, str)
self.reset = reset and not self.isPath
if self.reset:
self.position = self.path_or_file.seek(0, 1)
def __enter__(self):
if self.isPath:
self.open_file = open(self.path_or_file, self.mode)
return self.open_file
else:
self.open_file = None
return self.path_or_file
def __exit__(self, *args):
if self.open_file:
self.open_file.close()
if self.reset:
self.path_or_file.seek(self.position)
_numeric_dtypes = {np.dtype('float32'), np.dtype('float64'), np.dtype('int32'), np.dtype('int64')}
|
cox-labs/perseuspy | perseuspy/io/perseus/network.py | read_networks | python | def read_networks(folder):
network_table = read_perseus(path.join(folder, "networks.txt"))
networks = {}
for name, guid in network_table[['Name', 'GUID']].values:
networks[guid] = {
'name': name,
'guid': guid,
'node_table': read_perseus(path.join(folder, "{}_nodes.txt".format(guid))),
'edge_table': read_perseus(path.join(folder, "{}_edges.txt".format(guid)))
}
return network_table, networks | Read perseus network collection folder format
>>> network_table, networks = read_networks(folder)
:param folder: Path to network collection
:returns: Network table and dictionary with 'name', 'edge_table', and 'node_table' keys. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/network.py#L9-L27 | [
"def read_perseus(path_or_file, **kwargs):\n \"\"\"\n Read a Perseus-formatted matrix into a pd.DataFrame.\n Annotation rows will be converted into a multi-index.\n\n By monkey-patching the returned pd.DataFrame a `to_perseus`\n method for exporting the pd.DataFrame is made available.\n\n :param p... | from os import path, makedirs
import uuid
import networkx as nx
from collections import OrderedDict
from perseuspy.io.perseus.matrix import read_perseus
import pandas as pd
import warnings
def read_networks(folder):
"""
Read perseus network collection folder format
>>> network_table, networks = read_networks(folder)
:param folder: Path to network collection
:returns: Network table and dictionary with 'name', 'edge_table', and 'node_table' keys.
"""
network_table = read_perseus(path.join(folder, "networks.txt"))
networks = {}
for name, guid in network_table[['Name', 'GUID']].values:
networks[guid] = {
'name': name,
'guid': guid,
'node_table': read_perseus(path.join(folder, "{}_nodes.txt".format(guid))),
'edge_table': read_perseus(path.join(folder, "{}_edges.txt".format(guid)))
}
return network_table, networks
def from_perseus(network_table, networks):
"""
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
"""
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks
def write_networks(folder, network_table, networks):
"""
Writing networkTable, nodes and edges to Perseus readable format.
:param folder: Path to output directory.
:param network_table: Network table.
:param networks: Dictionary with node and edge tables, indexed by network guid.
"""
makedirs(folder, exist_ok=True)
network_table.to_perseus(path.join(folder, 'networks.txt'), main_columns=[])
for guid, network in networks.items():
network['node_table'].to_perseus(path.join(folder, '{}_nodes.txt'.format(guid)), main_columns=[])
network['edge_table'].to_perseus(path.join(folder, '{}_edges.txt'.format(guid)), main_columns=[])
|
cox-labs/perseuspy | perseuspy/io/perseus/network.py | from_perseus | python | def from_perseus(network_table, networks):
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs | Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks) | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/network.py#L29-L56 | null | from os import path, makedirs
import uuid
import networkx as nx
from collections import OrderedDict
from perseuspy.io.perseus.matrix import read_perseus
import pandas as pd
import warnings
def read_networks(folder):
"""
Read perseus network collection folder format
>>> network_table, networks = read_networks(folder)
:param folder: Path to network collection
:returns: Network table and dictionary with 'name', 'edge_table', and 'node_table' keys.
"""
network_table = read_perseus(path.join(folder, "networks.txt"))
networks = {}
for name, guid in network_table[['Name', 'GUID']].values:
networks[guid] = {
'name': name,
'guid': guid,
'node_table': read_perseus(path.join(folder, "{}_nodes.txt".format(guid))),
'edge_table': read_perseus(path.join(folder, "{}_edges.txt".format(guid)))
}
return network_table, networks
def from_perseus(network_table, networks):
"""
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
"""
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks
def write_networks(folder, network_table, networks):
"""
Writing networkTable, nodes and edges to Perseus readable format.
:param folder: Path to output directory.
:param network_table: Network table.
:param networks: Dictionary with node and edge tables, indexed by network guid.
"""
makedirs(folder, exist_ok=True)
network_table.to_perseus(path.join(folder, 'networks.txt'), main_columns=[])
for guid, network in networks.items():
network['node_table'].to_perseus(path.join(folder, '{}_nodes.txt'.format(guid)), main_columns=[])
network['edge_table'].to_perseus(path.join(folder, '{}_edges.txt'.format(guid)), main_columns=[])
|
cox-labs/perseuspy | perseuspy/io/perseus/network.py | to_perseus | python | def to_perseus(graphs):
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks | Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G]) | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/network.py#L58-L91 | null | from os import path, makedirs
import uuid
import networkx as nx
from collections import OrderedDict
from perseuspy.io.perseus.matrix import read_perseus
import pandas as pd
import warnings
def read_networks(folder):
"""
Read perseus network collection folder format
>>> network_table, networks = read_networks(folder)
:param folder: Path to network collection
:returns: Network table and dictionary with 'name', 'edge_table', and 'node_table' keys.
"""
network_table = read_perseus(path.join(folder, "networks.txt"))
networks = {}
for name, guid in network_table[['Name', 'GUID']].values:
networks[guid] = {
'name': name,
'guid': guid,
'node_table': read_perseus(path.join(folder, "{}_nodes.txt".format(guid))),
'edge_table': read_perseus(path.join(folder, "{}_edges.txt".format(guid)))
}
return network_table, networks
def from_perseus(network_table, networks):
"""
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
"""
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks
def write_networks(folder, network_table, networks):
"""
Writing networkTable, nodes and edges to Perseus readable format.
:param folder: Path to output directory.
:param network_table: Network table.
:param networks: Dictionary with node and edge tables, indexed by network guid.
"""
makedirs(folder, exist_ok=True)
network_table.to_perseus(path.join(folder, 'networks.txt'), main_columns=[])
for guid, network in networks.items():
network['node_table'].to_perseus(path.join(folder, '{}_nodes.txt'.format(guid)), main_columns=[])
network['edge_table'].to_perseus(path.join(folder, '{}_edges.txt'.format(guid)), main_columns=[])
|
cox-labs/perseuspy | perseuspy/io/perseus/network.py | write_networks | python | def write_networks(folder, network_table, networks):
makedirs(folder, exist_ok=True)
network_table.to_perseus(path.join(folder, 'networks.txt'), main_columns=[])
for guid, network in networks.items():
network['node_table'].to_perseus(path.join(folder, '{}_nodes.txt'.format(guid)), main_columns=[])
network['edge_table'].to_perseus(path.join(folder, '{}_edges.txt'.format(guid)), main_columns=[]) | Writing networkTable, nodes and edges to Perseus readable format.
:param folder: Path to output directory.
:param network_table: Network table.
:param networks: Dictionary with node and edge tables, indexed by network guid. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/network.py#L93-L105 | null | from os import path, makedirs
import uuid
import networkx as nx
from collections import OrderedDict
from perseuspy.io.perseus.matrix import read_perseus
import pandas as pd
import warnings
def read_networks(folder):
"""
Read perseus network collection folder format
>>> network_table, networks = read_networks(folder)
:param folder: Path to network collection
:returns: Network table and dictionary with 'name', 'edge_table', and 'node_table' keys.
"""
network_table = read_perseus(path.join(folder, "networks.txt"))
networks = {}
for name, guid in network_table[['Name', 'GUID']].values:
networks[guid] = {
'name': name,
'guid': guid,
'node_table': read_perseus(path.join(folder, "{}_nodes.txt".format(guid))),
'edge_table': read_perseus(path.join(folder, "{}_edges.txt".format(guid)))
}
return network_table, networks
def from_perseus(network_table, networks):
"""
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
"""
graphs = []
for guid, graph_attr in zip(network_table['GUID'], network_table.values):
network = networks[guid]
edge_table = network['edge_table']
if edge_table[['Source', 'Target']].duplicated().any():
warnings.warn('Duplicate edges were found and ignored in network {}'.format(network['name']))
G = nx.from_pandas_edgelist(edge_table, 'Source', 'Target', True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network['node_table']
if node_table['Node'].duplicated().any():
warnings.warn('Duplicate nodes were found and ignored in network {}'.format(network['name']))
node_column = node_table['Node']
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks
def write_networks(folder, network_table, networks):
"""
Writing networkTable, nodes and edges to Perseus readable format.
:param folder: Path to output directory.
:param network_table: Network table.
:param networks: Dictionary with node and edge tables, indexed by network guid.
"""
makedirs(folder, exist_ok=True)
network_table.to_perseus(path.join(folder, 'networks.txt'), main_columns=[])
for guid, network in networks.items():
network['node_table'].to_perseus(path.join(folder, '{}_nodes.txt'.format(guid)), main_columns=[])
network['edge_table'].to_perseus(path.join(folder, '{}_edges.txt'.format(guid)), main_columns=[])
|
cox-labs/perseuspy | perseuspy/parameters.py | _simple_string_value | python | def _simple_string_value(tree, kind, name):
query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name)
return tree.find(query).text | base function for extracting a simple parameter value.
:param tree: the parameters tree.
:param kind: the xml-tag name of the parameter.
:param name: the name of the parameter.
:returns value: the content of the parameter 'Value' as string. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L15-L22 | null | """ Perseus parameter parsing
This module contains convenience function for parsing
the Perseus parameters.xml file and extracting parameter values
"""
import xml.etree.ElementTree as ET
def parse_parameters(filename):
""" parse the parameters.xml file.
:param filename: 'parameters.xml'
:returns tree: a 'xml.etree.ElementTree' xml object.
"""
return ET.parse(filename)
def stringParam(parameters, name):
""" string parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'StringParam', name)
def fileParam(parameters, name):
""" file parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'FileParam', name)
def intParam(parameters, name):
""" integer parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return int(_simple_string_value(parameters, 'IntParam', name))
def boolParam(parameters, name):
""" boolean parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
value = _simple_string_value(parameters, 'BoolParam', name)
if value not in {'true', 'false'}:
raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value))
return value == 'true'
def doubleParam(parameters, name):
""" double parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return float(_simple_string_value(parameters, 'DoubleParam', name))
def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text)
def multiChoiceParam(parameters, name, type_converter = str):
""" multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
"""
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')]
def singleChoiceWithSubParams(parameters, name, type_converter = str):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceWithSubParams[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value, None
return type_converter(values[value].text), param.findall('SubParams/Parameters')[value]
def boolWithSubParams(parameters, name):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
"""
param = parameters.find(".//BoolWithSubParams[@Name='{name}']".format(name=name))
str_value = param.find('Value').text
if str_value not in {'true', 'false'}:
raise ValueError('BoolParamWithSubParams Value has to be either "true" or "false", was {}'.format(str_value))
value = str_value == 'true'
choice = 'SubParamsTrue' if value else 'SubParamsFalse'
return value, param.find('{}/Parameters'.format(choice))
|
cox-labs/perseuspy | perseuspy/parameters.py | boolParam | python | def boolParam(parameters, name):
value = _simple_string_value(parameters, 'BoolParam', name)
if value not in {'true', 'false'}:
raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value))
return value == 'true' | boolean parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L42-L49 | [
"def _simple_string_value(tree, kind, name):\n \"\"\" base function for extracting a simple parameter value.\n :param tree: the parameters tree.\n :param kind: the xml-tag name of the parameter.\n :param name: the name of the parameter.\n :returns value: the content of the parameter 'Value' as string... | """ Perseus parameter parsing
This module contains convenience function for parsing
the Perseus parameters.xml file and extracting parameter values
"""
import xml.etree.ElementTree as ET
def parse_parameters(filename):
""" parse the parameters.xml file.
:param filename: 'parameters.xml'
:returns tree: a 'xml.etree.ElementTree' xml object.
"""
return ET.parse(filename)
def _simple_string_value(tree, kind, name):
""" base function for extracting a simple parameter value.
:param tree: the parameters tree.
:param kind: the xml-tag name of the parameter.
:param name: the name of the parameter.
:returns value: the content of the parameter 'Value' as string."""
query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name)
return tree.find(query).text
def stringParam(parameters, name):
""" string parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'StringParam', name)
def fileParam(parameters, name):
""" file parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'FileParam', name)
def intParam(parameters, name):
""" integer parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return int(_simple_string_value(parameters, 'IntParam', name))
def doubleParam(parameters, name):
""" double parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return float(_simple_string_value(parameters, 'DoubleParam', name))
def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text)
def multiChoiceParam(parameters, name, type_converter = str):
""" multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
"""
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')]
def singleChoiceWithSubParams(parameters, name, type_converter = str):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceWithSubParams[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value, None
return type_converter(values[value].text), param.findall('SubParams/Parameters')[value]
def boolWithSubParams(parameters, name):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
"""
param = parameters.find(".//BoolWithSubParams[@Name='{name}']".format(name=name))
str_value = param.find('Value').text
if str_value not in {'true', 'false'}:
raise ValueError('BoolParamWithSubParams Value has to be either "true" or "false", was {}'.format(str_value))
value = str_value == 'true'
choice = 'SubParamsTrue' if value else 'SubParamsFalse'
return value, param.find('{}/Parameters'.format(choice))
|
cox-labs/perseuspy | perseuspy/parameters.py | singleChoiceParam | python | def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text) | single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L57-L67 | null | """ Perseus parameter parsing
This module contains convenience function for parsing
the Perseus parameters.xml file and extracting parameter values
"""
import xml.etree.ElementTree as ET
def parse_parameters(filename):
""" parse the parameters.xml file.
:param filename: 'parameters.xml'
:returns tree: a 'xml.etree.ElementTree' xml object.
"""
return ET.parse(filename)
def _simple_string_value(tree, kind, name):
""" base function for extracting a simple parameter value.
:param tree: the parameters tree.
:param kind: the xml-tag name of the parameter.
:param name: the name of the parameter.
:returns value: the content of the parameter 'Value' as string."""
query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name)
return tree.find(query).text
def stringParam(parameters, name):
""" string parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'StringParam', name)
def fileParam(parameters, name):
""" file parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'FileParam', name)
def intParam(parameters, name):
""" integer parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return int(_simple_string_value(parameters, 'IntParam', name))
def boolParam(parameters, name):
""" boolean parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
value = _simple_string_value(parameters, 'BoolParam', name)
if value not in {'true', 'false'}:
raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value))
return value == 'true'
def doubleParam(parameters, name):
""" double parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return float(_simple_string_value(parameters, 'DoubleParam', name))
def multiChoiceParam(parameters, name, type_converter = str):
""" multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
"""
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')]
def singleChoiceWithSubParams(parameters, name, type_converter = str):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceWithSubParams[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value, None
return type_converter(values[value].text), param.findall('SubParams/Parameters')[value]
def boolWithSubParams(parameters, name):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
"""
param = parameters.find(".//BoolWithSubParams[@Name='{name}']".format(name=name))
str_value = param.find('Value').text
if str_value not in {'true', 'false'}:
raise ValueError('BoolParamWithSubParams Value has to be either "true" or "false", was {}'.format(str_value))
value = str_value == 'true'
choice = 'SubParamsTrue' if value else 'SubParamsFalse'
return value, param.find('{}/Parameters'.format(choice))
|
cox-labs/perseuspy | perseuspy/parameters.py | multiChoiceParam | python | def multiChoiceParam(parameters, name, type_converter = str):
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')] | multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L69-L79 | null | """ Perseus parameter parsing
This module contains convenience function for parsing
the Perseus parameters.xml file and extracting parameter values
"""
import xml.etree.ElementTree as ET
def parse_parameters(filename):
""" parse the parameters.xml file.
:param filename: 'parameters.xml'
:returns tree: a 'xml.etree.ElementTree' xml object.
"""
return ET.parse(filename)
def _simple_string_value(tree, kind, name):
""" base function for extracting a simple parameter value.
:param tree: the parameters tree.
:param kind: the xml-tag name of the parameter.
:param name: the name of the parameter.
:returns value: the content of the parameter 'Value' as string."""
query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name)
return tree.find(query).text
def stringParam(parameters, name):
""" string parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'StringParam', name)
def fileParam(parameters, name):
""" file parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'FileParam', name)
def intParam(parameters, name):
""" integer parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return int(_simple_string_value(parameters, 'IntParam', name))
def boolParam(parameters, name):
""" boolean parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
value = _simple_string_value(parameters, 'BoolParam', name)
if value not in {'true', 'false'}:
raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value))
return value == 'true'
def doubleParam(parameters, name):
""" double parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return float(_simple_string_value(parameters, 'DoubleParam', name))
def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text)
def singleChoiceWithSubParams(parameters, name, type_converter = str):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceWithSubParams[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value, None
return type_converter(values[value].text), param.findall('SubParams/Parameters')[value]
def boolWithSubParams(parameters, name):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
"""
param = parameters.find(".//BoolWithSubParams[@Name='{name}']".format(name=name))
str_value = param.find('Value').text
if str_value not in {'true', 'false'}:
raise ValueError('BoolParamWithSubParams Value has to be either "true" or "false", was {}'.format(str_value))
value = str_value == 'true'
choice = 'SubParamsTrue' if value else 'SubParamsFalse'
return value, param.find('{}/Parameters'.format(choice))
|
cox-labs/perseuspy | perseuspy/parameters.py | boolWithSubParams | python | def boolWithSubParams(parameters, name):
param = parameters.find(".//BoolWithSubParams[@Name='{name}']".format(name=name))
str_value = param.find('Value').text
if str_value not in {'true', 'false'}:
raise ValueError('BoolParamWithSubParams Value has to be either "true" or "false", was {}'.format(str_value))
value = str_value == 'true'
choice = 'SubParamsTrue' if value else 'SubParamsFalse'
return value, param.find('{}/Parameters'.format(choice)) | single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L93-L104 | null | """ Perseus parameter parsing
This module contains convenience function for parsing
the Perseus parameters.xml file and extracting parameter values
"""
import xml.etree.ElementTree as ET
def parse_parameters(filename):
""" parse the parameters.xml file.
:param filename: 'parameters.xml'
:returns tree: a 'xml.etree.ElementTree' xml object.
"""
return ET.parse(filename)
def _simple_string_value(tree, kind, name):
""" base function for extracting a simple parameter value.
:param tree: the parameters tree.
:param kind: the xml-tag name of the parameter.
:param name: the name of the parameter.
:returns value: the content of the parameter 'Value' as string."""
query = ".//{kind}[@Name='{name}']/Value".format(kind=kind, name=name)
return tree.find(query).text
def stringParam(parameters, name):
""" string parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'StringParam', name)
def fileParam(parameters, name):
""" file parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return _simple_string_value(parameters, 'FileParam', name)
def intParam(parameters, name):
""" integer parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return int(_simple_string_value(parameters, 'IntParam', name))
def boolParam(parameters, name):
""" boolean parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
value = _simple_string_value(parameters, 'BoolParam', name)
if value not in {'true', 'false'}:
raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value))
return value == 'true'
def doubleParam(parameters, name):
""" double parameter value.
:param parameters: the parameters tree.
:param name: the name of the parameter. """
return float(_simple_string_value(parameters, 'DoubleParam', name))
def singleChoiceParam(parameters, name, type_converter = str):
""" single choice parameter value. Returns -1 if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value
return type_converter(values[value].text)
def multiChoiceParam(parameters, name, type_converter = str):
""" multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
"""
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')]
def singleChoiceWithSubParams(parameters, name, type_converter = str):
""" single choice with sub parameters value and chosen subparameters. Returns -1 and None if no value was chosen.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'"""
param = parameters.find(".//SingleChoiceWithSubParams[@Name='{name}']".format(name=name))
value = int(param.find('Value').text)
values = param.find('Values')
if value < 0:
return value, None
return type_converter(values[value].text), param.findall('SubParams/Parameters')[value]
|
cox-labs/perseuspy | perseuspy/io/maxquant.py | read_rawFilesTable | python | def read_rawFilesTable(filename):
exp = pd.read_table(filename)
expected_columns = {'File', 'Exists', 'Size', 'Data format', 'Parameter group', 'Experiment', 'Fraction'}
found_columns = set(exp.columns)
if len(expected_columns - found_columns) > 0:
message = '\n'.join(['The raw files table has the wrong format!',
'It should contain columns:',
', '.join(sorted(expected_columns)),
'Found columns:',
', '.join(sorted(found_columns))])
raise ValueError(message)
exp['Raw file'] = exp['File'].apply(path.basename).apply(path.splitext).str.get(0)
exp['Experiment'] = exp['Experiment'].astype(str)
return exp | parse the 'rawFilesTable.txt' file into a pandas dataframe | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/maxquant.py#L8-L22 | null | """
utility functions for parsing various generic output tables
of MaxQuant/Perseus
"""
import pandas as pd
from os import path
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | read_dependent_peptides | python | def read_dependent_peptides(filename):
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization | read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L16-L27 | [
"def _count_localizations(df):\n \"\"\" count the most likely localization for each depentent peptide.\n :param df: allPeptides.txt table.\n \"\"\"\n grp = df.groupby(_index_columns)\n counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))\n counts.index = counts.index.set_names('DP... | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
from collections import defaultdict
def count(args):
""" count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1})
"""
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts
def _count_localizations(df):
""" count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table.
"""
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization
def _frequent_localizations(df):
""" returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table.
"""
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result)
def run_dependent_peptides_from_parameters(paramfile, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file.
"""
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile)
def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file.
"""
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns)
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | _set_column_names | python | def _set_column_names(dep, exp):
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep | rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L29-L39 | null | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def read_dependent_peptides(filename):
""" read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information.
"""
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization
from collections import defaultdict
def count(args):
""" count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1})
"""
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts
def _count_localizations(df):
""" count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table.
"""
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization
def _frequent_localizations(df):
""" returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table.
"""
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result)
def run_dependent_peptides_from_parameters(paramfile, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file.
"""
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile)
def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file.
"""
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns)
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | count | python | def count(args):
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts | count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1}) | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L42-L51 | null | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def read_dependent_peptides(filename):
""" read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information.
"""
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
from collections import defaultdict
def _count_localizations(df):
""" count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table.
"""
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization
def _frequent_localizations(df):
""" returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table.
"""
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result)
def run_dependent_peptides_from_parameters(paramfile, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file.
"""
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile)
def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file.
"""
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns)
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | _count_localizations | python | def _count_localizations(df):
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization | count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L53-L62 | null | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def read_dependent_peptides(filename):
""" read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information.
"""
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
from collections import defaultdict
def count(args):
""" count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1})
"""
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts
def _frequent_localizations(df):
""" returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table.
"""
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result)
def run_dependent_peptides_from_parameters(paramfile, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file.
"""
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile)
def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file.
"""
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns)
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | _frequent_localizations | python | def _frequent_localizations(df):
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result) | returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L64-L77 | null | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def read_dependent_peptides(filename):
""" read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information.
"""
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
from collections import defaultdict
def count(args):
""" count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1})
"""
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts
def _count_localizations(df):
""" count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table.
"""
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization
def run_dependent_peptides_from_parameters(paramfile, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file.
"""
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile)
def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file.
"""
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns)
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | run_dependent_peptides_from_parameters | python | def run_dependent_peptides_from_parameters(paramfile, outfile):
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile) | transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L79-L89 | [
"def fileParam(parameters, name):\n \"\"\" file parameter value.\n :param parameters: the parameters tree.\n :param name: the name of the parameter. \"\"\"\n return _simple_string_value(parameters, 'FileParam', name)\n",
"def parse_parameters(filename):\n \"\"\" parse the parameters.xml file.\n ... | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def read_dependent_peptides(filename):
""" read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information.
"""
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
from collections import defaultdict
def count(args):
""" count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1})
"""
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts
def _count_localizations(df):
""" count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table.
"""
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization
def _frequent_localizations(df):
""" returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table.
"""
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result)
def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file.
"""
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns)
|
cox-labs/perseuspy | perseuspy/dependent_peptides.py | run_dependent_peptides | python | def run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile):
__dep, localization = read_dependent_peptides(allPeptides_file)
exp = read_rawFilesTable(rawFilesTable_file)
_dep = _set_column_names(__dep, exp)
main_columns = list(_dep.columns)
dep = _dep.join(localization).reset_index()
dep.to_perseus(outfile, main_columns=main_columns) | transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param allPeptides_file: MaxQuant 'allPeptides.txt' output table.
:param rawFilesTable_file: MaxQuant 'Raw files'-tab table.
:param outfile: Path to the output file. | train | https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/dependent_peptides.py#L91-L103 | [
"def read_rawFilesTable(filename):\n \"\"\"parse the 'rawFilesTable.txt' file into a pandas dataframe\"\"\"\n exp = pd.read_table(filename)\n expected_columns = {'File', 'Exists', 'Size', 'Data format', 'Parameter group', 'Experiment', 'Fraction'}\n found_columns = set(exp.columns)\n if len(expected_... | """
Dependent peptides can be extracted from the `allPeptides.txt` table
and are annotated using the `experimentalDesign.txt`.
This code forms the basis for the corresponding Perseus plugin PluginDependentPeptides.
"""
import pandas as pd
from perseuspy.io.perseus.matrix import read_perseus
pd.read_perseus = read_perseus
from perseuspy.io.maxquant import read_rawFilesTable
from perseuspy.parameters import fileParam, parse_parameters
import numpy as np
_index_columns = ['DP Proteins', 'DP Base Sequence', 'DP Cluster Index', 'DP Modification']
_cols = ['DP Ratio mod/base', 'Raw file', 'DP AA'] + _index_columns
def read_dependent_peptides(filename):
""" read the dependent peptides table and extract localiztion information
:param filename: path to the 'allPeptides.txt' table.
:returns dep, localization: the dependent peptide table, localization information.
"""
df = (pd.read_perseus(filename, usecols=_cols)
.dropna(subset=['DP Ratio mod/base']))
df['DP Ratio mod/base'] = df['DP Ratio mod/base'].astype(float)
dep = df.pivot_table('DP Ratio mod/base', index=_index_columns,
columns='Raw file', aggfunc=np.median)
localization = _count_localizations(df)
return dep, localization
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
from collections import defaultdict
def count(args):
""" count occurences in a list of lists
>>> count([['a','b'],['a']])
defaultdict(int, {'a' : 2, 'b' : 1})
"""
counts = defaultdict(int)
for arg in args:
for item in arg:
counts[item] = counts[item] + 1
return counts
def _count_localizations(df):
""" count the most likely localization for each depentent peptide.
:param df: allPeptides.txt table.
"""
grp = df.groupby(_index_columns)
counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values))
counts.index = counts.index.set_names('DP AA', level=4)
counts.name = 'DP AA count'
best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations)
return best_localization
def _frequent_localizations(df):
""" returns the most frequent localization for any dependent peptide.
In case of ties, preference is given to n-terminal modification which are
biologically more likely to occur
:param df: allPeptides.txt table.
"""
max_count = int(df['DP AA count'].max())
max_aa = set(df[df['DP AA count'] == max_count]['DP AA'].unique())
result = {'DP AA max count' : max_count}
if 'nterm' in max_aa:
result['DP AA'] = 'nterm'
else:
result['DP AA'] = ';'.join(sorted(max_aa))
return pd.Series(result)
def run_dependent_peptides_from_parameters(paramfile, outfile):
""" transform a allPeptides.txt and experimentalDesign.txt table
into the dependentPeptides.txt table written in outfile.
:param paramfile: Perseus parameters.xml including at least two FileParam
entries names 'allPeptides.txt' and 'experimentalDesign.txt'.
:param outfile: Path to the output file.
"""
parameters = parse_parameters(paramfile)
allPeptides_file = fileParam(parameters, 'allPeptides.txt')
rawFilesTable_file = fileParam(parameters, 'Raw files table')
run_dependent_peptides(allPeptides_file, rawFilesTable_file, outfile)
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | checkout_and_create_branch | python | def checkout_and_create_branch(repo, name):
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout() | Checkout branch. Create it if necessary | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L9-L21 | null | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_create_push_branch(repo, name):
"""Checkout this branch. Create it if necessary, and push it to origin.
"""
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True)
def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha
def get_repo_hexsha(git_folder):
"""Get the SHA1 of the current repo"""
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha
def checkout_with_fetch(git_folder, refspec, repository="origin"):
"""Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode.
"""
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec)
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success")
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | checkout_create_push_branch | python | def checkout_create_push_branch(repo, name):
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True) | Checkout this branch. Create it if necessary, and push it to origin. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L23-L32 | [
"def checkout_and_create_branch(repo, name):\n \"\"\"Checkout branch. Create it if necessary\"\"\"\n local_branch = repo.branches[name] if name in repo.branches else None\n if not local_branch:\n if name in repo.remotes.origin.refs:\n # If origin branch exists but not local, git.checkout ... | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_and_create_branch(repo, name):
"""Checkout branch. Create it if necessary"""
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout()
def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha
def get_repo_hexsha(git_folder):
"""Get the SHA1 of the current repo"""
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha
def checkout_with_fetch(git_folder, refspec, repository="origin"):
"""Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode.
"""
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec)
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success")
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | do_commit | python | def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha | Do a commit if modified/untracked files | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L35-L47 | [
"def checkout_and_create_branch(repo, name):\n \"\"\"Checkout branch. Create it if necessary\"\"\"\n local_branch = repo.branches[name] if name in repo.branches else None\n if not local_branch:\n if name in repo.remotes.origin.refs:\n # If origin branch exists but not local, git.checkout ... | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_and_create_branch(repo, name):
"""Checkout branch. Create it if necessary"""
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout()
def checkout_create_push_branch(repo, name):
"""Checkout this branch. Create it if necessary, and push it to origin.
"""
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True)
def get_repo_hexsha(git_folder):
"""Get the SHA1 of the current repo"""
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha
def checkout_with_fetch(git_folder, refspec, repository="origin"):
"""Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode.
"""
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec)
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success")
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.