text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = '1'
from model.group import Group
import random
import string
import os.path
import getopt
import sys
import time
import clr
clr.AddReferenceByName('Microsoft.Office.Interop.Excel, Version=12.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')
from Microsoft.Office.Interop import Excel
try:
opts, args = getopt.getopt(sys.argv[1:],"n:f:",["number of groups","file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.xlsx"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata =[ Group(name="")] + [
Group(name=random_string("name",10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
excel = Excel.ApplicationClass()
excel.visible = True
workbook = excel.Workbooks.Add()
sheet = workbook.ActiveSheet
for i in range(len(testdata)):
sheet.Range["A%s" % (i+1)].Value2 = testdata[i].name
workbook.SaveAs(file)
time.sleep(10)
excel.Quit() | {
"repo_name": "liliasapurina/ironpython_training",
"path": "generator/group.py",
"copies": "1",
"size": "1230",
"license": "apache-2.0",
"hash": 6179516049224937000,
"line_mean": 20.224137931,
"line_max": 124,
"alpha_frac": 0.6723577236,
"autogenerated": false,
"ratio": 2.9782082324455206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9127992560701461,
"avg_score": 0.004514679068811726,
"num_lines": 58
} |
__author__ = '1'
from pytest_bdd import given, when, then
from model.address import Address
import random
@given('a address list')
def address_list(db):
return db.get_address_list()
@given('a address with <name>,<nickname>,<lastname>,<middlename>')
def new_address(name,nickname,lastname,middlename):
return Address(name=name,nickname=nickname,lastname=lastname,middlename=middlename)
@when('I add the address to the list')
def add_new_address(app, new_address):
app.address.create(new_address)
@then('The new address list is equal to old list with the added address')
def verify_address_added(db, address_list, new_address):
old_addresses = address_list
new_addresses = db.get_address_list()
old_addresses.append(new_address)
assert sorted(old_addresses, key=Address.id_or_max) == sorted(new_addresses, key=Address.id_or_max)
@given('a non-empty address list')
def non_empty_address_list(app, db, new_address):
if len(db.get_address_list()) == 0:
app.address.create(new_address)
return db.get_address_list()
@given('a random address from the list')
def random_address(non_empty_address_list):
return random.choice(non_empty_address_list)
@when('I delete the address from the list')
def delete_address(app, random_address):
app.address.delete_address_by_id(random_address.id)
@then('the new address list is equal old list without the deleted address')
def verify_address_deleted(db, non_empty_address_list, random_address):
old_addresses = non_empty_address_list
new_addresses = db.get_address_list()
assert len(old_addresses) - 1 == len(new_addresses)
old_addresses.remove(random_address)
assert old_addresses == new_addresses
@given('a new values for address fields with <new_name>')
def new_address_values(new_name):
return Address(name=new_name)
@when('I edit the address from the list')
def edit_address_value(app, db, new_address_value, non_empty_address_list):
old_addresses = non_empty_address_list
current_address = new_address_value
address = random.choice(old_addresses)
app.address.edit_address_by_id(address.id, current_address)
@then('the new address list is equal old list with edited feature values')
def verify_address_edited(app, db, non_empty_address_list, check_ui):
old_addresses = non_empty_address_list
assert len(old_addresses) == len(db.get_address_list())
new_addresses = db.get_address_list()
if check_ui:
assert sorted(new_addresses, key = Address.id_or_max) == sorted(app.address.get_address_list(), key = Address.id_or_max)
| {
"repo_name": "liliasapurina/python_training",
"path": "bdd/address_steps.py",
"copies": "1",
"size": "2582",
"license": "apache-2.0",
"hash": -1650964450218694400,
"line_mean": 38.7230769231,
"line_max": 128,
"alpha_frac": 0.7257939582,
"autogenerated": false,
"ratio": 3.3488975356679638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4574691493867964,
"avg_score": null,
"num_lines": null
} |
__author__ = '1'
from pytest_bdd import given, when, then
from model.group import Group
import random
@given('a group list')
def group_list(db):
return db.get_group_list()
@given('a group with <name>, <header> and <footer>')
def new_group(name, header, footer):
return Group(name=name, header=header, footer=footer)
@when('I add the group to the list')
def add_new_group(app, new_group):
app.group.create(new_group)
@then('The new group list is equal to old list with the added group')
def verify_group_added(db, group_list, new_group):
old_groups = group_list
new_groups = db.get_group_list()
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
@given('a non-empty group list')
def non_empty_group_list(app, db, new_group):
if len(db.get_group_list()) == 0:
app.group.create(new_group)
return db.get_group_list()
@given('a random group from the list')
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when('I delete the group from the list')
def delete_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then('the new group list is equal old list without the deleted group')
def verify_group_deleted(db, non_empty_group_list, random_group):
old_groups = non_empty_group_list
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(random_group)
assert old_groups == new_groups
| {
"repo_name": "liliasapurina/python_training",
"path": "bdd/group_steps.py",
"copies": "1",
"size": "1525",
"license": "apache-2.0",
"hash": -3130263287801531000,
"line_mean": 31.4468085106,
"line_max": 93,
"alpha_frac": 0.6970491803,
"autogenerated": false,
"ratio": 3.0684104627766597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9136955420393105,
"avg_score": 0.025700844536711013,
"num_lines": 47
} |
__author__ = '1'
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.project import ProjectHelper
from fixture.james import JamesHelper
from fixture.signup import SignupHelper
from fixture.mail import MailHelper
from fixture.soap import SoapHelper
class Application:
def __init__(self, browser, config):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.project = ProjectHelper(self)
self.james = JamesHelper(self)
self.signup = SignupHelper(self)
self.mail = MailHelper(self)
self.soap = SoapHelper(self)
self.config = config
self.baseUrl = config["web"]["baseUrl"]
def open_home_page(self):
wd = self.wd
wd.get(self.baseUrl)
def destroy(self):
self.wd.quit()
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False | {
"repo_name": "liliasapurina/python_training_mantiss",
"path": "fixture/application.py",
"copies": "1",
"size": "1221",
"license": "apache-2.0",
"hash": 2666932139152073000,
"line_mean": 27.4186046512,
"line_max": 65,
"alpha_frac": 0.6142506143,
"autogenerated": false,
"ratio": 4.153061224489796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5267311838789795,
"avg_score": null,
"num_lines": null
} |
__author__ = '1'
import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.application import Application
from fixture.db import DbFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.is_valid():
fixture = Application(browser = browser, baseUrl = web_config["baseUrl"])
fixture.session.ensure_login(username = web_config["username"],password = web_config["password"])
return fixture
@pytest.fixture(scope = "session")
def db(request):
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture = DbFixture(host=db_config["host"], name=db_config["name"], user=db_config["user"], password=db_config["password"])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope = "session", autouse = True)
def stop(request):
# how to destroy fixture
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser",action="store",default="firefox")
parser.addoption("--target",action="store",default="target.json")
parser.addoption("--check_ui",action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),"data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
| {
"repo_name": "liliasapurina/python_training",
"path": "conftest.py",
"copies": "1",
"size": "2462",
"license": "apache-2.0",
"hash": -6900977132761336000,
"line_mean": 30.5641025641,
"line_max": 129,
"alpha_frac": 0.6783103168,
"autogenerated": false,
"ratio": 3.6636904761904763,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4842000792990476,
"avg_score": null,
"num_lines": null
} |
__author__ = '1'
class Contact:
def __init__(self, first_name, middle_name, last_name, nickname, photo, title, company, address, phones, emails,
homepage, birthday, anniversary, group, secondary):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.photo = photo
self.title = title
self.company = company
self.address = address
self.phones = phones
self.emails = emails
self.homepage = homepage
self.birthday = birthday
self.anniversary = anniversary
self.group = group
self.secondary = secondary
class PhoneSection:
def __init__(self, home, mobile, work, fax):
self.home = home
self.mobile = mobile
self.work = work
self.fax = fax
def fill(self, wd):
wd.find_element_by_name("home").send_keys(self.home)
wd.find_element_by_name("mobile").send_keys(self.mobile)
wd.find_element_by_name("work").send_keys(self.work)
wd.find_element_by_name("fax").send_keys(self.fax)
class EmailsSection:
def __init__(self, email1, email2, email3):
self.email1 = email1
self.email2 = email2
self.email3 = email3
def fill(self, wd):
wd.find_element_by_name("email").send_keys(self.email1)
wd.find_element_by_name("email2").send_keys(self.email2)
wd.find_element_by_name("email3").send_keys(self.email3)
class SecondarySection:
def __init__(self, address, home, notes):
self.address = address
self.home = home
self.notes = notes
def fill(self, wd):
wd.find_element_by_name("address2").send_keys(self.address)
wd.find_element_by_name("phone2").send_keys(self.home)
wd.find_element_by_name("notes").send_keys(self.notes)
| {
"repo_name": "Acket74/python_training",
"path": "contact.py",
"copies": "1",
"size": "1900",
"license": "apache-2.0",
"hash": 766454364917166100,
"line_mean": 31.2033898305,
"line_max": 116,
"alpha_frac": 0.6110526316,
"autogenerated": false,
"ratio": 3.4608378870673953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4571890518667395,
"avg_score": null,
"num_lines": null
} |
__author__ = '3ev0'
import argparse
import logging
import sys
import datetime
from dnsmon import service
from dnsmon import webapp
_log = logging.getLogger()
DEF_INTERVAL = 60*24
def main():
argparser = argparse.ArgumentParser(description="Control the dns-monitor services.")
argparser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging")
subparser = argparser.add_subparsers(dest="service", help="Choose a service to control")
ls_parser = subparser.add_parser("monitor", help="The service that monitors the domains.")
ls_parser.add_argument("-i", "--interval", type=int, default=DEF_INTERVAL, help="The interval between lookups (in minutes). Default: {:d}".format(DEF_INTERVAL))
webapp_parser = subparser.add_parser("webapp", help="The web application.")
args = argparser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
format="%(asctime)s|%(levelname)s|%(module)s|%(threadName)s|%(message)s")
_log.info("Cli invoked: %s", " ".join(sys.argv))
if args.service == "monitor":
service.configure(num_threads=3, lookup_interval=datetime.timedelta(minutes=args.interval), db_host="localhost", db_port="27017", db_name="dnsmon")
service.run()
elif args.service == "webapp":
webapp.configure(host="0.0.0.0", port="8000")
webapp.run()
else:
_log.error("%s not implemented", args.service)
if __name__ == "__main__":
main() | {
"repo_name": "3ev0/dns-monitor",
"path": "dnsmon/cli.py",
"copies": "1",
"size": "1504",
"license": "apache-2.0",
"hash": 6826737889758097000,
"line_mean": 35.7073170732,
"line_max": 164,
"alpha_frac": 0.6715425532,
"autogenerated": false,
"ratio": 3.659367396593674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4830909949793674,
"avg_score": null,
"num_lines": null
} |
__author__ = '3ev0'
import unittest
from dnsmon import whois
class MyTestCase(unittest.TestCase):
def setUp(self):
self.whoisclient = whois.WhoisClient()
# def test_talk_whois(self):
# resp = self.whoisclient._talk_whois("whois.iana.org", "nu.nl")
# self.assertIn("source:", resp)
# self.assertIn("domain:", resp)
def test_is_ipaddr(self):
self.assertTrue(self.whoisclient._is_ipaddr("1.23.442.11"))
self.assertFalse(self.whoisclient._is_ipaddr("a.1.3.4"))
self.assertFalse(self.whoisclient._is_ipaddr("1.33.41"))
def test_is_domain(self):
self.assertTrue(self.whoisclient._is_domain("www.nu.nl"))
self.assertFalse(self.whoisclient._is_domain("1.2.3.4"))
self.assertTrue(self.whoisclient._is_domain("nu.n134"))
self.assertFalse(self.whoisclient._is_domain("nu.123"))
self.assertTrue(self.whoisclient._is_domain(".info"))
def test_is_ipaddr(self):
self.assertTrue(self.whoisclient._is_ipaddr("::1"))
self.assertTrue(self.whoisclient._is_ipaddr("a00::1111:2"))
self.assertFalse(self.whoisclient._is_ipaddr("aa::bb::1"))
self.assertTrue(self.whoisclient._is_ipaddr("a:a:a:a:b:1:2:3"))
def test_parse_whois_response(self):
test_response = """
% IANA WHOIS server
% for more information on IANA, visit http://www.iana.org
% This query returned 1 object
refer: whois.apnic.net
inetnum: 1.0.0.0 - 1.255.255.255
organisation: APNIC
status: ALLOCATED
whois: whois.apnic.net
changed: 2010-01
source: IANA
"""
wdata = self.whoisclient._parse_whois_response(test_response)
self.assertEqual(wdata, ([{"refer": "whois.apnic.net"},
{"inetnum":"1.0.0.0 - 1.255.255.255", "organisation": "APNIC", "status": "ALLOCATED"},
{"whois": "whois.apnic.net"},
{"changed": "2010-01", "source": "IANA"}]
))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "3ev0/dns-monitor",
"path": "dnsmon/tests/whois_test.py",
"copies": "1",
"size": "2072",
"license": "apache-2.0",
"hash": -1482039249084187600,
"line_mean": 32.4193548387,
"line_max": 120,
"alpha_frac": 0.597007722,
"autogenerated": false,
"ratio": 3.139393939393939,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9222570848442891,
"avg_score": 0.002766162590209511,
"num_lines": 62
} |
__author__ = '3ev0'
"""
Whois library.
See RFC 3912
https://www.icann.org/resources/pages/approved-with-specs-2013-09-17-en#whois
We parse the key/value pairs as-is and do not try to transform to a uniformish format. This should be good enough.
How to deal with domain intermediaries? This is, afaik, limited to .com, .edu and .net domains.
For these domains we do a second referal to get the more detailed whois data.
"""
import re
import logging
import socket
import threading
import datetime
import time
from . import libnet
_log = logging.getLogger(__name__)
_lock = threading.Lock()
_auth_wserver_cache = {}
_whois_root = "whois.iana.org"
_last_queried = {}
_last_queried_lock = threading.RLock()
_config = {"min_query_interval": 0.5}
def configure(**kwargs):
_config.update(kwargs)
_log.info("Module configured: %s", _config)
def repr_records(whoisdata):
lines = []
for record in whoisdata:
for k in sorted(record):
values = record[k].split("\n")
for val in values:
lines.append("{}: {}".format(k, val))
lines.append("")
return "\n".join(lines)
def domain_lookup(domain, wserver=None, raw=False):
if not libnet.is_domain(domain):
raise ValueError("%s is not a valid domain", domain)
if len(domain.strip(".").split(".")) is 1:
tld = domain.split(".")[-1]
whoisdata = _talk_whois(_whois_root, "."+tld)
else:
if not wserver:
wserver = _get_auth_wserver_domain(domain)
whoisdata = _talk_whois(wserver, domain)
if raw:
return whoisdata
else:
return _parse_whois_response(whoisdata)
def lookup(querystr, wserver=None, raw=False):
if libnet.is_domain(querystr):
return domain_lookup(querystr, wserver, raw)
elif libnet.is_ipaddr(querystr):
return ip_lookup(querystr, wserver, raw)
elif libnet.is_asnum(querystr):
return ip_lookup(querystr, wserver, raw)
else:
raise ValueError(querystr, "Should be domain, ip or asnum")
pass
def ip_lookup(querystr, wserver=None, raw=False):
if not libnet.is_ipaddr(querystr) and not libnet.is_asnum(querystr):
raise ValueError("%s is not a valid IP-address or ASnum", querystr)
if not wserver:
wserver = _get_auth_wserver(querystr)
if wserver == "whois.arin.net": # ofcourse, the yanks need some special switches
querystr = "+ " + querystr
elif wserver == "whois.ripe.net": # no special query needed
pass
elif wserver == "whois.apnic.net": # no special query needed
pass
elif wserver == "whois.afrinic.net": # no special query needed
pass
elif wserver == "whois.lacnic.net": # no special query needed
pass
if raw:
return _talk_whois(wserver, querystr)
else:
return _parse_whois_response(_talk_whois(wserver, querystr))
def _parse_whois_response(response):
"""
Dealing with the many many different interpretations of the whois response format.
If an empty line is encountered, start a new record
If a line with a semicolon is encountered, treat everything before first : as key and start a value
If a line without semicolon is encountered when value is started, add it to current value.
If a line without semicolon is encountered before value is started, skip it.
:param response: the raw response to parse
:return:a list of records containg (key, value) tuples
"""
newkvre = re.compile("^(\s*)([^\>\%\s][^:]+):(\s*(.*))?$")
commre = re.compile("^\s*[\%\>\@\;].*$")
records = []
currecord, curkey = {}, None
comment = False
for line in response.splitlines():
if line.strip() is "":
comment = False
if len(currecord):
records.append(currecord)
currecord, curkey = {}, None
continue
if comment:
continue
match = newkvre.match(line)
matchcomm = commre.match(line)
if match and matchcomm is None:
curkey = match.group(2)
val = match.group(4) if match.group(4) else ""
if curkey in currecord:
currecord[curkey] += "\n" + val
else:
currecord[curkey] = val
elif matchcomm: # part of comments
comment = True
continue
elif match is None and curkey: # this is likely part of multiline value
currecord[curkey] += "\n" + line.strip()
else:
comment = True
continue # this is likely start of comments
if len(currecord):
records.append(currecord)
_log.debug("Response parsed succesfully. %d records", len(records))
return records
def _talk_whois(wserver, querystr):
_delay(wserver)
sock = socket.create_connection((wserver, 43))
_log.debug("Connected to %s", wserver)
queryblob = bytes(querystr + "\r\n", encoding="utf8", errors="replace")
msglen = len(querystr)
totalsent = 0
while totalsent < msglen:
sent = sock.send(queryblob[totalsent:])
totalsent += sent
_log.debug("Request sent: %s", querystr)
chunks = []
chunk = sock.recv(4096)
chunks.append(chunk)
while len(chunk) > 0:
chunk = sock.recv(4096)
chunks.append(chunk)
response = str(b"".join(chunks), encoding="utf8", errors="replace")
_log.debug("Response received:\n%s", response)
return response
def _get_cached_wserver(key):
with _lock:
wserver = _auth_wserver_cache.get(key, None)
if wserver:
_log.debug("Cache hit on %s: %s", key, wserver)
else:
_log.debug("Cache miss on %s", key)
return wserver
def _cache_wserver(domain, wserver):
with _lock:
_auth_wserver_cache[domain] = wserver
def _get_auth_wserver_domain(domain):
"""
Return the authorative whois server for the domain. It queries the global iana whois server and finds the referal
whois server for the TLD of this domain
:param domain: The domain for which the whois server should be found
:return:the domain name of the whois server for this domain
"""
tld = domain.split(".")[-1]
_log.debug("looking up authorative wserver for %s (tld: %s)", domain, tld)
auth_wserver = _get_cached_wserver(tld)
if not auth_wserver:
respdata = _parse_whois_response(_talk_whois(_whois_root, "."+tld))
for record in respdata:
if "whois" in record:
auth_wserver = record["whois"]
_cache_wserver(tld, auth_wserver)
break
if not auth_wserver:
_log.error("Could not determine auth whois server for %s", domain)
raise Exception("Could not determine auth whois server for {}".format(domain))
# Special case. There is a second tier authorative server for .com .edu and .net
if auth_wserver == "whois.verisign-grs.com":
_log.debug("Looking up intermediary authorative wserver for %s", domain)
respdata = _parse_whois_response(_talk_whois(auth_wserver, "=" + domain))
for record in respdata:
if "Domain Name" in record:
auth_wserver = record["Whois Server"]
break
_log.debug("Found authorative whois server: %s", auth_wserver)
return auth_wserver
def _get_auth_wserver(querystr):
"""
Return the authorative whois server for this request. It queries the global iana whois server and finds the referal
whois server for the query.
:param querystr: The IP or ASnum for which the whois server should be found
:return:the address of the whois server for this query string
"""
_log.debug("looking up authorative wserver for %s", querystr)
auth_wserver = _get_cached_wserver(querystr)
if auth_wserver:
return auth_wserver
respdata = _parse_whois_response(_talk_whois(_whois_root, querystr))
try:
auth_wserver = respdata[0]["refer"]
except (KeyError, IndexError) as e:
auth_wserver = None
if not auth_wserver:
_log.error("Could not determine auth whois server for %s", querystr)
raise Exception("Could not determine auth whois server for {}".format(querystr))
_cache_wserver(querystr, auth_wserver)
_log.debug("Found authorative whois server: %s", auth_wserver)
return auth_wserver
def _delay(wserver):
"""
This forces threads to delay a preconfigured interval before querying the specified whois server again.
The thread that holds the wserver lock does not release until at least interval seconds have passed since last release.
:param wserver: The wserver for which the thread should delay
:return:
"""
with _last_queried_lock:
if wserver not in _last_queried:
_last_queried[wserver] = [threading.RLock(), 0]
with _last_queried[wserver][0]:
interval = datetime.datetime.now().timestamp() - _last_queried[wserver][1]
sleep_time = _config["min_query_interval"] - interval
if sleep_time > 0:
_log.debug("%s Delaying to query %s: %f seconds...", threading.current_thread().name, wserver, sleep_time)
time.sleep(sleep_time)
_last_queried[wserver][1] = datetime.datetime.now().timestamp()
| {
"repo_name": "3ev0/dns-monitor",
"path": "dnsmon/whois.py",
"copies": "1",
"size": "9321",
"license": "apache-2.0",
"hash": 130123753985403380,
"line_mean": 34.5763358779,
"line_max": 123,
"alpha_frac": 0.634159425,
"autogenerated": false,
"ratio": 3.61139093374661,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47455503587466097,
"avg_score": null,
"num_lines": null
} |
__author__ = '4Thing'
import urllib2
import random
import threading
import thread
import Queue
user_agent_list = \
[
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
ip_list = []
ip_queue = Queue.Queue()
def run():
user_agent = random.choice(user_agent_list)
proxy = random.choice(ip_list)
request = urllib2.Request('http://www.baidu.com/')
request.add_header('User-Agent', user_agent)
opener = urllib2.build_opener(urllib2.ProxyHandler({'http':'http://%s/'%proxy}), urllib2.HTTPHandler(debuglevel=1))
urllib2.install_opener(opener)
response = urllib2.urlopen(request, timeout=30)
print response.geturl()
lock = thread.allocate_lock()
def run2():
lock.acquire()
user_agent = random.choice(user_agent_list)
if(ip_queue.empty() == True):
lock.release()
ip = ip_queue.get()
lock.release()
request = urllib2.Request('http://www.baidu.com/')
request.add_header('User-Agent', user_agent)
opener = urllib2.build_opener(urllib2.ProxyHandler({'http':'http://%s/'%ip}))
urllib2.install_opener(opener)
try:
response = opener.open(request, timeout=30)
print ip+":"+response.geturl()
except:
print 'something wrong!'
file = open("D:\Doc\ips.txt")
threads = []
def run3():
while(True):
run2()
for i in range(20):
threads.append(threading.Thread(target=run3))
for line in file:
ip_queue.put(line)
if __name__ == '__main__':
for t in threads:
t.setDaemon(False)
t.start()
while(ip_queue.empty() == False):
pass
print "over!" | {
"repo_name": "4xin/scrapy-study",
"path": "urllibTest/urllibDemo.py",
"copies": "1",
"size": "3915",
"license": "apache-2.0",
"hash": -6904811541591299000,
"line_mean": 36.4019607843,
"line_max": 119,
"alpha_frac": 0.601532567,
"autogenerated": false,
"ratio": 2.847272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8916220597206386,
"avg_score": 0.006516939413268155,
"num_lines": 102
} |
__author__ = '4Thing'
import urllib2
import threading
import thread
import Queue
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
ip_queue = Queue.Queue()
threads = []
lock = thread.allocate_lock()
file = open("D:\Doc\ips.txt")
for line in file:
ip_queue.put(line.rstrip())
def authIP():
while(ip_queue.empty() == False):
lock.acquire()
ip = ip_queue.get()
lock.release()
request = urllib2.Request('http://www.cncn.gov.cn/art/2015/3/3/art_432_187630.html')
request.add_header('User-Agent', user_agent)
opener = urllib2.build_opener(urllib2.ProxyHandler({'http':'http://%s/'%ip}))
try:
response = opener.open(request, timeout=30)
# if(response.geturl().find("baidu")):
# print ip+":"+" is valid!"
print response.geturl()
print response.read()
except:
pass
for i in range(20):
threads.append(threading.Thread(target=authIP))
if __name__ == '__main__':
for t in threads:
t.setDaemon(False)
t.start()
while(ip_queue.empty() == False):
pass | {
"repo_name": "4xin/scrapy-study",
"path": "ProxyAuth/ProxyAuth.py",
"copies": "1",
"size": "1183",
"license": "apache-2.0",
"hash": 9043869931886548000,
"line_mean": 26.9024390244,
"line_max": 92,
"alpha_frac": 0.5655114117,
"autogenerated": false,
"ratio": 3.3512747875354107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44167861992354107,
"avg_score": null,
"num_lines": null
} |
__author__ = '7634'
import socket
import sys
from math import sin,cos
from time import clock,sleep
# Create a socket (SOCK_STREAM means a TCP socket)
class ModelKtn:
""" Ktn model
"""
def __init__(self):
self.n = 10.0
self.v = 2.77
self.w = 1.0
self.omega = 1.0
self.t = 0.0
def data(self):
self.t += 1/self.n
self.x = 0.0
self.y = self.v*self.t
self.z = sin(self.w*self.t) + 1.0
self.V_x = 0.0
self.V_y = self.v
self.V_z = self.w * cos(self.w * self.t)
self.fi_z = self.omega * self.t
self.fi_x = 0.0
self.fi_y = 0.0
self.w_x = 0.0
self.w_y = 0.0
self.w_z = 0.0
return 'ktn;'+str(clock())+';' + \
str(self.t) + ';' + \
str(self.x) + ';' + \
str(self.y) + ';' + \
str(self.z) + ';' + \
str(self.V_x) + ';' + \
str(self.V_y) + ';' + \
str(self.V_z) + ';' + \
str(self.fi_z) + ';' + \
str(self.fi_x) + ';' + \
str(self.fi_y) + ';' + \
str(self.w_x) + ';' + \
str(self.w_y) + ';' + \
str(self.w_z)
class ControllerKtn:
def __init__(self):
self.HOST = "0.0.0.0"
self.PORT = 9991
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print ("KTN connected with CVS")
self.ktn = ModelKtn()
def send(self):
data = self.ktn.data()
print ("Sent: {}".format(data))
self.sock.sendto(data + "\n",(self.HOST,self.PORT))
sleep(0.1)
def run(self):
for i in range(0,1000):
self.send()
ctrl = ControllerKtn()
ctrl.run() | {
"repo_name": "juhnowski/DataStreams",
"path": "udp_ktn.py",
"copies": "1",
"size": "1884",
"license": "mit",
"hash": 339785135135966400,
"line_mean": 25.7352941176,
"line_max": 68,
"alpha_frac": 0.4102972399,
"autogenerated": false,
"ratio": 3.108910891089109,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40192081309891087,
"avg_score": null,
"num_lines": null
} |
__author__ = '7634'
import socket
import sys
from math import sin,cos,sqrt,pi
from time import clock,sleep
# Create a socket (SOCK_STREAM means a TCP socket)
class ModelTas:
""" Tas model
"""
def __init__(self):
self.n = 50.0
self.t = 0.0
self.x_max = 1280.0
self.y_max = 1024.0
# model type
self.m = 0
# parameters for elliptic model
self.a = 600.0
self.b = 400.0
self.x_e = 640.0
self.y_e = 512.0
self.x_c = -self.a
self.y_c = 0.0
# parameters of the simulation model capture target
self.x_1 = 10.0
self.y_1 = 300.0
self.y_2 = 1000.0
# imitation's vars
self.x = 0.0
self.y = 0.0
self.fi_1 = 0.0
self.fi_2 = 0.0
def data(self):
self.t += 1/self.n
if (self.m == 0):
self.m0()
else:
self.m1()
return 'tas;'+str(clock())+';' + \
str(self.t) + ';' + \
str(self.x) + ';' + \
str(self.y) + ';' + \
str(self.fi_1) + ';' + \
str(self.fi_2)
# Model elliptical motion targets
def m0(self):
if (self.x_c < self.a):
self.y_c = (self.b/self.a)*sqrt(self.a*self.a - self.x_c*self.x_c)
else:
self.y_c = -(self.b/self.a)*sqrt(self.a*self.a - self.x_c*self.x_c)
self.x = self.x_e + self.x_c
self.y = self.y_e + self.y_c
self.t += 1/self.n
self.x_c += 1.0
return
# Capture target's simulation model
def m1(self):
if(self.t < self.y_2/self.n):
self.x = self.x_1
self.y = self.n*self.t
self.fi_1 = pi*self.t/(4*self.n)
self.fi_2 = 0
return
if (self.t < (self.y_2 - self.y_1)/self.n ):
self.x = self.x_1
self.y = self.y_2 - self.n*self.t
self.fi_1 = pi*self.t/4 - pi*self.t/(8*self.n)
self.fi_2 = 0
return
if (self.t <(self.x_max - self.x_1)/self.n):
self.x = self.x_1 + self.n*self.t
self.y = self.y_1
self.fi_1 = pi*self.t/8
self.fi_2 = pi*self.t/(360*self.n)
return
if (self.t == (self.x_max - self.x_1)/self.n):
self.t = 0
return
class ControllerTas:
def __init__(self):
self.HOST = "0.0.0.0"
self.PORT = 9991
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print ("Tas connected with CVS")
self.tas = ModelTas()
def send(self):
data = self.tas.data()
print "Sent: {}".format(data)
self.sock.sendto(data + "\n",(self.HOST,self.PORT))
sleep(0.02)
def run(self):
for i in range(0,1000):
self.send()
ctrl = ControllerTas()
ctrl.run() | {
"repo_name": "juhnowski/DataStreams",
"path": "udp_tas.py",
"copies": "1",
"size": "2985",
"license": "mit",
"hash": 5145508383742742000,
"line_mean": 26.4476190476,
"line_max": 79,
"alpha_frac": 0.4525963149,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39525963149,
"avg_score": null,
"num_lines": null
} |
__author__ = '7634'
import socket
import sys
from math import sin,cos,sqrt
from time import clock,sleep
# Create a socket (SOCK_STREAM means a TCP socket)
class ModelLd:
""" Ktn model
"""
def __init__(self):
self.n = 100.0
self.y_min = -3000.0
self.y_max = 3000.0
self.r = 0.0
self.y = 0.0
self.v = 400.0
self.z = 1000.0
self.t = 0.0
def data(self):
self.t += 1/self.n
self.y = self.y_max - self.v*self.t
if (self.y <= self.y_min):
self.y = self.y_max
self.r = sqrt(self.z*self.z + self.y*self.y)
return 'ld;'+str(clock())+';' + \
str(self.t) + ';' + \
str(self.r)
class ControllerLd:
def __init__(self):
self.HOST = "0.0.0.0"
self.PORT = 9991
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print ("Ld connected with CVS")
self.ld = ModelLd()
def send(self):
data = self.ld.data()
print "Sent: {}".format(data)
self.sock.sendto(data + "\n",(self.HOST,self.PORT))
sleep(0.01)
def run(self):
for i in range(0,1000):
self.send()
ctrl = ControllerLd()
ctrl.run() | {
"repo_name": "juhnowski/DataStreams",
"path": "udp_ld.py",
"copies": "1",
"size": "1319",
"license": "mit",
"hash": 1718320720330337500,
"line_mean": 23.4038461538,
"line_max": 68,
"alpha_frac": 0.4821834723,
"autogenerated": false,
"ratio": 3.2170731707317075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41992566430317074,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aagrawal'
__version__ = '0.9'
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
import json
import datetime
import copy
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not annotation_file == None and not question_file == None:
print 'loading VQA annotations and questions into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
questions = json.load(open(question_file, 'r'))
print datetime.datetime.utcnow() - time_t
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']}
qa = {ann['question_id']: [] for ann in self.dataset['annotations']}
qqa = {ann['question_id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToQA[ann['image_id']] += [ann]
qa[ann['question_id']] = ann
for ques in self.questions['questions']:
qqa[ques['question_id']] = ques
print 'index created!'
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],[])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['question_id'] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(quesIds) == 0:
anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa],[])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['image_id'] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann['question_id']
print "Question: %s" %(self.qqa[quesId]['question'])
for ans in ann['answers']:
print "Answer %d: %s" %(ans['answer_id'], ans['answer'])
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset['info'] = copy.deepcopy(self.questions['info'])
res.dataset['task_type'] = copy.deepcopy(self.questions['task_type'])
res.dataset['data_type'] = copy.deepcopy(self.questions['data_type'])
res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype'])
res.dataset['license'] = copy.deepcopy(self.questions['license'])
print 'Loading and preparing results... '
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results is not an array of objects'
annsQuesIds = [ann['question_id'] for ann in anns]
assert set(annsQuesIds) == set(self.getQuesIds()), \
'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann['question_id']
if res.dataset['task_type'] == 'Multiple Choice':
assert ann['answer'] in self.qqa[quesId]['multiple_choices'], 'predicted answer is not one of the multiple choices'
qaAnn = self.qa[quesId]
ann['image_id'] = qaAnn['image_id']
ann['question_type'] = qaAnn['question_type']
ann['answer_type'] = qaAnn['answer_type']
print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())
res.dataset['annotations'] = anns
res.createIndex()
return res
| {
"repo_name": "VT-vision-lab/VQA",
"path": "PythonHelperTools/vqaTools/vqa.py",
"copies": "1",
"size": "7157",
"license": "bsd-2-clause",
"hash": -2754397214706195000,
"line_mean": 39.2078651685,
"line_max": 233,
"alpha_frac": 0.6652228587,
"autogenerated": false,
"ratio": 3.219523166891588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9146315392195942,
"avg_score": 0.04768612667912929,
"num_lines": 178
} |
__author__ = 'aagrawal'
__version__ = '0.9'
"""
Modified by Mateusz Malinowski [mmalinow@mpi-inf.mpg.de] to work
with test datasets without annotations.
"""
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
import json
import datetime
import copy
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
def init_empty_dataset():
d = {}
d['info'] = 'empty'
d['annotations'] = []
return d
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not question_file == None:
print 'loading VQA annotations and questions into memory...'
time_t = datetime.datetime.utcnow()
if annotation_file is not None:
dataset = json.load(open(annotation_file, 'r'))
else:
dataset = init_empty_dataset()
questions = json.load(open(question_file, 'r'))
print datetime.datetime.utcnow() - time_t
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']}
qa = {ann['question_id']: [] for ann in self.dataset['annotations']}
qqa = {ann['question_id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToQA[ann['image_id']] += [ann]
qa[ann['question_id']] = ann
for ques in self.questions['questions']:
qqa[ques['question_id']] = ques
print 'index created!'
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],[])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['question_id'] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(quesIds) == 0:
anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa],[])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['image_id'] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann['question_id']
print "Question: %s" %(self.qqa[quesId]['question'])
for ans in ann['answers']:
print "Answer %d: %s" %(ans['answer_id'], ans['answer'])
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset['info'] = copy.deepcopy(self.questions['info'])
res.dataset['task_type'] = copy.deepcopy(self.questions['task_type'])
res.dataset['data_type'] = copy.deepcopy(self.questions['data_type'])
res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype'])
res.dataset['license'] = copy.deepcopy(self.questions['license'])
print 'Loading and preparing results... '
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results is not an array of objects'
annsQuesIds = [ann['question_id'] for ann in anns]
assert set(annsQuesIds) == set(self.getQuesIds()), \
'Results do not correspond to current VQA set. Either the results do have predictions for all question ids in annotation file or there is one/more questions id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann['question_id']
if res.dataset['task_type'] == 'Multiple Choice':
assert ann['answer'] in self.qqa[quesId]['multiple_choices'], 'predicted answer is not one of the multiple choices'
qaAnn = self.qa[quesId]
ann['image_id'] = qaAnn['image_id']
ann['question_type'] = qaAnn['question_type']
ann['answer_type'] = qaAnn['answer_type']
print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())
res.dataset['annotations'] = anns
res.createIndex()
return res
| {
"repo_name": "mateuszmalinowski/visual_turing_test-tutorial",
"path": "kraino/utils/vqaTools/vqa.py",
"copies": "1",
"size": "8434",
"license": "mit",
"hash": 1412840564757121800,
"line_mean": 42.6994818653,
"line_max": 233,
"alpha_frac": 0.5833530946,
"autogenerated": false,
"ratio": 3.9010175763182238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49843706709182234,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aahammer'
import datetime
import time
from time import mktime
def getMatchData(tree):
match_col = {}
if tree.xpath('//span[@id="lblHeader"][1]')[0].text == None:
return
match_col['queue_type'], match_col['match_type'] = tree.xpath('//span[@id="lblHeader"][1]')[0].text.split(':')
ids = ['lblMatchId','lblMatchTime','lblMatchDuration', None]
ids_db = ['match_id', 'end_time', 'duration']
match_statistics = tree.xpath('//div[@id="detailsTopBar"][1]')
for mstats in match_statistics:
id_index = 0
for element in mstats.iter():
if element.attrib.has_key('id'):
if element.attrib['id'] == ids[id_index]:
match_col[ids_db[id_index]] = element.text
id_index += 1
match_col['match_id'] = int(match_col['match_id'])
match_col['duration'] = int(match_col['duration'])
match_col['end_time'] = datetime.datetime.fromtimestamp(mktime(time.strptime(match_col['end_time'], "%m/%d/%Y %H:%M:%S GMT")))
return match_col
def getPlayerData(tree):
teamData = []
ids = ['lblGodName','pdName','pdKills','pdDeaths','pdAssists','pdGold', 'pdDamagePlayer', 'pdDamageStructure', 'Label1', 'pdDamageBot', 'pdDamageTaken', None]
ids_db = ['god', 'name', 'kills', 'deaths', 'assists', 'gold','damage_player', 'damage_structure', 'healing', 'damage_creep', 'damage_taken']
data_ids_db = ['item1', 'item2', 'item3', 'item4', 'item5', 'item6', 'active1', 'active2']
# Entry Path for Playerdetails
player_statistics = tree.xpath('//div[@id="detailsContent"]/div[2]//div[@id="panDetailsTable"]/table')
for pstats in player_statistics:
id_index = 0
data_id_index = 0
player = {}
for element in pstats.iter():
if element.attrib.has_key('id'):
if element.attrib['id'] == ids[id_index]:
if element.attrib['id'] == 'lblGodName' or element.attrib['id'] == 'pdName': player[ids_db[id_index]] = element.text
else: player[ids_db[id_index]] = int(element.text)
#print element.attrib['id']+ ' : ' + element.text
id_index += 1
elif element.attrib.has_key('class') and element.attrib['class'] == 'playerDetailsTable':
#print 'level' + ' : ' + element.xpath('tr/td')[0].text.strip()
player['level'] = int(element.xpath('tr/td')[0].text.strip())
elif element.attrib.has_key('data-id'):
player[data_ids_db[data_id_index]] = int(element.attrib['data-id'])
data_id_index += 1
teamData.append(player)
return teamData | {
"repo_name": "aahammer/smitestats",
"path": "src/load/parseSmiteMatch.py",
"copies": "1",
"size": "2700",
"license": "mit",
"hash": -902340133176381400,
"line_mean": 39.9242424242,
"line_max": 162,
"alpha_frac": 0.5748148148,
"autogenerated": false,
"ratio": 3.3834586466165413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9426424035246252,
"avg_score": 0.006369885234057575,
"num_lines": 66
} |
__author__ = 'aakilomar'
import requests, json, time
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
#from rest_requests import add_user
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def add_group(userid,phonenumbers):
post_url = host + "/api/group/add/" + str(userid) + "/" + phonenumbers
return requests.post(post_url,None, verify=False).json()
#/add/{userId}/{groupId}/{issue}
def add_vote(userid,groupid,issue):
post_url = host + "/api/vote/add/" + str(userid) + "/" + str(groupid) + "/" + issue
return requests.post(post_url,None, verify=False).json()
def vote_list():
list_url = host + "/api/vote/listallfuture"
r = requests.get(list_url)
print r.json
print r.text
def set_event_time(eventid,time):
post_url = host + "/api/event/settime/" + str(eventid) + "/" + time
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def manualreminder(eventid,message):
post_url = host + "/api/event/manualreminder/" + str(eventid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
user = add_user("0826607134")
group = add_group(user['id'],"0821111111")
user2 = add_user("0821111112")
group = add_user_to_group(user2['id'],group['id'])
print user
print group
issue = add_vote(user['id'], group['id'],"test vote")
print issue
#future_votes = vote_list()
#print future_votes
issue = set_event_time(issue['id'],"30th 7pm")
r = rsvp(issue['id'],user['id'],"yes")
r2 = rsvp(issue['id'],user2['id'],"no")
r = rsvp(issue['id'],user['id'],"yes")
ok = manualreminder(issue['id'],"|") # should use reminder mesage
ok = manualreminder(issue['id'],"my manual messsage")
| {
"repo_name": "PaballoDitshego/grassroot-platform",
"path": "docs/tests/vote_requests.py",
"copies": "2",
"size": "2148",
"license": "bsd-3-clause",
"hash": 4294464664050359000,
"line_mean": 30.5882352941,
"line_max": 96,
"alpha_frac": 0.6601489758,
"autogenerated": false,
"ratio": 2.8870967741935485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4547245749993548,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aakilomar'
import requests, json, time
from timeit import default_timer as timer
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
def cancel_event(eventid):
post_url = host + "/api/event/cancel/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def upcomingVotes(groupid):
post_url = host + "/api/event/upcoming/vote/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def upcomingMeeting(groupid):
post_url = host + "/api/event/upcoming/meeting/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def votesPerGroupForEvent(groupid, eventid):
post_url = host + "/api/event/rsvp/totalspergroup/" + str(groupid) + "/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDate(userid, groupid, message, actionbydate):
post_url = host + "/api/logbook/addwithdate/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDateAndAssign(userid, groupid, message, actionbydate, assigntouserid):
post_url = host + "/api/logbook/addwithdateandassign/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate + "/" + str(assigntouserid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message, replicate):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + str(replicate)
return requests.post(post_url,None, verify=False).json()
def listReplicated(groupid):
post_url = host + "/api/logbook/listreplicated/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def listReplicated(groupid, completed):
post_url = host + "/api/logbook/listreplicated/" + str(groupid) + "/" + str(completed)
return requests.get(post_url,None, verify=False).json()
def setInitiatedSession(userid):
post_url = host + "/api/user/setinitiatedsession/" + str(userid)
return requests.post(post_url,None, verify=False).json()
def listReplicatedMessage(groupid, message):
post_url = host + "/api/logbook/listreplicatedbymessage/" + str(groupid) + "/" + message
return requests.get(post_url,None, verify=False).json()
def createAccount(userid,groupid,accountname):
post_url = host + "/api/account/add/" + str(userid) + "/" + str(groupid) + "/" + str(accountname)
return requests.post(post_url,None, verify=False).json()
def ussdStart(phonenumber,enteredUssd):
post_url = host + "/ussd/start?msisdn=" + str(phonenumber)
return requests.get(post_url,None, verify=False)
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def remove_user_from_group(userid,groupid):
post_url = host + "/api/group/remove/userfromgroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def get_user_join_group(userid,groupid):
post_url = host + "/api/group/get/userjoingroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).content
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def add_event(userid,groupid, name):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name
return requests.post(post_url,None, verify=False).json()
#print cancel_event(5166)
#user = add_user("0823333332")
#user = add_user("0821111111")
#print "user-->" + str(user)
#print rsvp(5167,user['id'],"no")
#print rsvpRequired(user['id'])
#print voteRequired(user['id'])
#print upcomingVotes(231)
#print votesPerGroupForEvent(194,5103)
#print addLogBook(1,85,"X must do Y")
#print addLogBook(1,88,"Somebody must Y",True) # has sub groups
#print addLogBook(1,85,"Somebody must do X",True) # no subgroups
#print listReplicated(88,False)
#print addLogBookWithDateAndAssign(1,21,"aakil must do Y","2015-12-13 08:45:00",588)
#print addLogBookWithDate(1,21,"someone must do Y","2015-12-13 08:45:00")
#print setInitiatedSession(588)
#print(listReplicatedMessage(88,"Somebody must X"))
#print(createAccount(1,21,"acc 21"))
#for i in range(1,7,1):
## start = timer()
# print ussdStart("0826607134","")
# end = timer()
# print(end - start)
#print add_user_to_group(588,82)
#print remove_user_from_group(588,82)
#print get_user_join_group(588,82)
#print voteRequired(817)
print rsvpRequired(817)
print "klaarie"
| {
"repo_name": "mokoka/grassroot-platform",
"path": "docs/tests/adhoc_requests.py",
"copies": "2",
"size": "5668",
"license": "bsd-3-clause",
"hash": 5066557155340078000,
"line_mean": 39.7769784173,
"line_max": 159,
"alpha_frac": 0.6887791108,
"autogenerated": false,
"ratio": 2.9551616266944736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46439407374944736,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aakilomar'
import requests, json, time
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
# different project but left as an example
def rewrite_event(event):
post_url = host + "/api/event/create/"
headers = {"Authorization" : "(some auth code)", "Accept" : "application/json", "Content-Type" : "application/json"}
r = requests.post(url=post_url,data=json.dumps(event),headers=headers, verify=False)
print r.json
print r.text
def event_list():
list_url = host + "/api/event/list"
r = requests.get(list_url, verify=False)
print r.json
print r.text
def user_list():
list_url = host + "/api/user/list"
return requests.get(list_url, verify=False).json()
#/list/groupandsubgroups/{groupId}
def group_and_subgroup_list(groupid):
list_url = host + "/api/group/list/groupandsubgroups/" + str(groupid)
return requests.get(list_url, verify=False).json()
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def add_group(userid,phonenumbers):
post_url = host + "/api/group/add/" + str(userid) + "/" + phonenumbers
return requests.post(post_url,None, verify=False).json()
def add_sub_group(userid,groupid,subgroupname):
post_url = host + "/api/group/add/subgroup/" + str(userid) + "/" + str(groupid) + "/" + str(subgroupname)
return requests.post(post_url,None, verify=False).json()
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def add_event(userid,groupid, name):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name
return requests.post(post_url,None, verify=False).json()
def add_event_set_subgroups(userid,groupid, name, includesubgroups):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name + "/" + includesubgroups
return requests.post(post_url,None, verify=False).json()
def set_event_location(eventid,location):
post_url = host + "/api/event/setlocation/" + str(eventid) + "/" + location
return requests.post(post_url,None, verify=False).json()
#def set_event_day(eventid,day):
# post_url = host + "/api/event/setday/" + str(eventid) + "/" + day
# return requests.post(post_url,None, verify=False).json()
def set_event_time(eventid,time):
post_url = host + "/api/event/settime/" + str(eventid) + "/" + time
return requests.post(post_url,None, verify=False).json()
def cancel_event(eventid):
post_url = host + "/api/event/cancel/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def rsvpTotals(eventid):
post_url = host + "/api/event/rsvp/totals/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
#add("nogge meeting")
#setlocation(1,"ellispark")
#event = {'name': 'bulls game','location': 'moftus'}
#create(event)
# create user
user = add_user("0826607134")
print user
print "user.id..." + str(user['id'])
#print user_list()
# create group
grouplevel1 = add_group(user['id'],"0821111111 0821111112")
print grouplevel1
print "group.id..." + str(grouplevel1['id'])
# create sub groups
grouplevel2 = add_sub_group(user['id'],grouplevel1['id'],"level 2")
grouplevel3 = add_sub_group(user['id'],grouplevel2['id'],"level 3")
# add users to subgroupd
userlevel2_1 = add_user("0822222221")
userlevel2_2 = add_user("0822222222")
userlevel3_1 = add_user("0823333331")
userlevel3_2 = add_user("0823333332")
grouplevel2 = add_user_to_group(userlevel2_1['id'],grouplevel2['id'])
grouplevel2 = add_user_to_group(userlevel2_2['id'],grouplevel2['id'])
grouplevel3 = add_user_to_group(userlevel3_1['id'],grouplevel3['id'])
grouplevel3 = add_user_to_group(userlevel3_2['id'],grouplevel3['id'])
# create event
event = add_event_set_subgroups(user['id'],grouplevel1['id'],"sub groups and all","true")
print event
event = set_event_location(event['id'],"ellispark")
print event
event = set_event_time(event['id'],"30th 11pm")
# let's rsvp some china's
eventlog = rsvp(event['id'],user['id'],"yes")
print eventlog
eventlog = rsvp(event['id'],userlevel2_1['id'],"yes")
print eventlog
eventlog = rsvp(event['id'],userlevel2_2['id'],"no")
print eventlog
eventlog = rsvp(event['id'],userlevel3_1['id'],"fuckoff")
print eventlog
# wait for mq messagess to be cleared
print "sleeping 3 seconds"
time.sleep(3)
# let's make some changes
event = set_event_location(event['id'],"loftus")
event = set_event_time(event['id'],"30th 10pm")
# wait for mq messagess to be cleared
print "sleeping 3 seconds"
time.sleep(3)
# and now cancel
#event = cancel_event(event['id'])
#print event
totals = rsvpTotals(event['id'])
print totals
userlevel1_3 = add_user("0821111113")
add_user_to_group(userlevel1_3['id'],grouplevel1['id'])
userlevel2_3 = add_user("0822222223")
add_user_to_group(userlevel2_3['id'],grouplevel2['id'])
add_user_to_group(user['id'],grouplevel2['id'])
# lets add lots of events
#
#for idx in range(1,5000,1):
# event = add_event_set_subgroups(user['id'],grouplevel1['id'],"repeat group " + str(idx),"true")
print "rsvp required for 0826607134...\n"
rsvpreq = rsvpRequired(user['id'])
for rsvpevent in rsvpreq:
print rsvpevent
print "rsvp required for 0823333332...\n"
rsvpreq = rsvpRequired(userlevel3_2['id'])
for rsvpevent in rsvpreq:
print rsvpevent
# wait for mq messagess to be cleared
print "sleeping 3 seconds"
time.sleep(3)
# get user 0821111111 - add does a load or save
userlevel1_1 = add_user("0821111111")
# rsvp no for 0821111111
eventlog = rsvp(event['id'],userlevel1_1['id'],"no")
# make change and see if 0821111111 gets the change
event = set_event_time(event['id'],"30th 9pm")
#event = set_event_location(event['id'],"new location")
# wait for mq messagess to be cleared
print "sleeping 3 seconds"
time.sleep(3)
#event = set_event_location(event['id'],"zoo lake")
print " get group and subgroups for " + str(grouplevel1['id'])
grouplist = group_and_subgroup_list(grouplevel1['id'])
for group in grouplist:
print group
print 'klaarie!!!'
| {
"repo_name": "mokoka/grassroot-platform",
"path": "docs/tests/rest_requests.py",
"copies": "2",
"size": "6527",
"license": "bsd-3-clause",
"hash": 3690699247240019500,
"line_mean": 32.3010204082,
"line_max": 120,
"alpha_frac": 0.6880649609,
"autogenerated": false,
"ratio": 2.880406001765225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45684709626652253,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aamir'
import argparse
from model import LatLongData
from model import PlaceData
def get_latlong(days):
latlong_data = LatLongData(int(days)).get_data()
display(latlong_data)
def get_places(days):
places_data = PlaceData(int(days)).get_data()
display(places_data)
def display(data):
i = 0
print "REGION" + "\t" + "EARTHQUAKE_COUNT" + "\t" + "TOTAL MAGNITUDE"
while i != 10:
print data[i]
i += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--days', help='Number of days back from the current time to consider. Defaults to 30', default=30)
parser.add_argument('--region', help="Either 'latlong' or 'name', if latlong is specified, regions are defined"
" according to block of latitude/longitude, if 'name' is specified, "
"regions are defined according to place name.Defaults to 'name'", default="name")
args = vars(parser.parse_args())
if args['region'] == 'name':
get_places(args['days'])
if args['region'] == 'latlong':
get_latlong(args['days'])
| {
"repo_name": "aamir/eathquake",
"path": "main.py",
"copies": "1",
"size": "1183",
"license": "mit",
"hash": -2337871117599279000,
"line_mean": 33.7941176471,
"line_max": 127,
"alpha_frac": 0.5984784446,
"autogenerated": false,
"ratio": 3.70846394984326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9627480555119281,
"avg_score": 0.03589236786479605,
"num_lines": 34
} |
__author__ = 'aamir'
import math
class Aggregator:
def __init__(self):
self.magnitude = []
self.max_mag = 0
self.count = 0
self.total_energy = 0
self.max_energy = 0
def add_magnitude(self, magnitude):
self.count += 1
self.magnitude.append(magnitude)
if self.max_mag < magnitude:
self.max_mag = magnitude
self.calculate_energy(magnitude)
def calculate_energy(self, magnitude):
if magnitude:
temp = (10**magnitude)
self.total_energy += temp
if self.max_energy < temp:
self.max_energy = temp
class Place(Aggregator):
def __init__(self, place):
self.place = place
Aggregator.__init__(self)
def __repr__(self):
return self.place + "\t" + str(self.count) + "\t" + str(math.log10(self.total_energy/self.count))
class LatLong(Aggregator):
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
Aggregator.__init__(self)
def __repr__(self):
return "( " + str(self.lat) + str(self.lon) + " )" + "\t" + str(math.log10(self.total_energy/self.count))
| {
"repo_name": "aamir/eathquake",
"path": "aggregator.py",
"copies": "1",
"size": "1179",
"license": "mit",
"hash": -7470073298244385000,
"line_mean": 27.0714285714,
"line_max": 113,
"alpha_frac": 0.5530110263,
"autogenerated": false,
"ratio": 3.6055045871559632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4658515613455963,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aamir'
import os
import sqlite3
import requests
import datetime
api_endpoint = "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.geojson"
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class Data:
def __init__(self, last_days):
self.data_file = 'earthquakes.db'
self.date_file = 'date.txt'
self.until = last_days
if not os.path.exists(self.data_file):
self.conn = sqlite3.connect(self.data_file)
self.conn.row_factory = dict_factory
self.c = self.conn.cursor()
# Create table
self.c.execute(
'''CREATE TABLE quakes (quake_id TEXT PRIMARY KEY, place TEXT, time datetime, lat real, lon real, mag real)''')
# Save (commit) the changes
self.conn.commit()
else:
self.conn = sqlite3.connect(self.data_file)
self.conn.row_factory = dict_factory
self.c = self.conn.cursor()
def get(self):
if os.path.exists(self.date_file):
f = open(self.date_file, 'r')
strDate = f.readline()
if strDate:
last_updated_date_time = datetime.datetime.strptime(strDate, "%Y-%m-%d %H:%M:%S")
current_date_time = datetime.datetime.now()
margin = datetime.timedelta(minutes=15)
if current_date_time - last_updated_date_time >= margin:
return self.refetch()
else:
return self.db()
else:
self.refetch()
else:
return self.refetch()
def db(self):
current_date_time = (datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds()
margin = datetime.timedelta(days=self.until).total_seconds()
greater_than_date_time = current_date_time - margin
strTime = str(greater_than_date_time)
dbmodels = self.c.execute(''' SELECT * FROM quakes WHERE time >= ? ''',
(strTime, )).fetchall()
self.conn.close()
return dbmodels
def refetch(self):
dbmodels = []
db_t = []
r = requests.get(api_endpoint)
data = r.json()
features = data['features']
for feature in features:
dbmodel = dict()
geometry = feature['geometry']
properties = feature['properties']
cordinates = geometry['coordinates']
dbmodel['quake_id'] = feature['id']
dbmodel['place'] = properties['place']
dbmodel['lon'] = cordinates[0]
dbmodel['lat'] = cordinates[1]
dbmodel['mag'] = properties['mag']
dbmodel['time'] = properties['time']/1000
db_t.append((dbmodel['quake_id'], dbmodel['place'], dbmodel['time'], dbmodel['lat'], dbmodel['lon'],
dbmodel['mag']))
dbmodels.append(dbmodel)
# TODO make db call async
self.c.executemany("INSERT OR REPLACE INTO quakes VALUES (?,?,?,?,?,?)", db_t)
self.conn.commit()
f = open(self.date_file, 'w')
current_date_time = datetime.datetime.now()
f.write(current_date_time.strftime("%Y-%m-%d %H:%M:%S"))
return self.db()
| {
"repo_name": "aamir/eathquake",
"path": "data.py",
"copies": "1",
"size": "3374",
"license": "mit",
"hash": -8626345582344966000,
"line_mean": 36.9101123596,
"line_max": 127,
"alpha_frac": 0.5444576171,
"autogenerated": false,
"ratio": 3.795275590551181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48397332076511806,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aamir'
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_noop as _
from .fields import AutoCleanField
from .util import clean
def base_clean_model(**kwargs):
defaults = {
'depends_on': 'name',
'unique': True,
'truncate_symbols': True,
}
defaults.update(kwargs)
class WrapBaseCleanModel(models.Model):
clean_key = AutoCleanField(**defaults)
class Meta:
abstract = True
def clean(self):
if defaults['unique']is True:
value = getattr(self, defaults['depends_on'])
new = self.pk is None
qs = self._default_manager.all()
if self.pk is not None:
qs = qs.exclude(pk=self.pk)
if value and qs.filter(clean_key=clean(value, truncate_symbols=defaults['truncate_symbols']))\
.exists():
raise ValidationError(_('{0} with name "{1}" already exists.'.format(self.__class__.__name__, value)))
return WrapBaseCleanModel
BaseCleanModel = base_clean_model()
| {
"repo_name": "intellisense/django-autoclean",
"path": "autoclean/models.py",
"copies": "1",
"size": "1174",
"license": "mit",
"hash": -6593532050342292000,
"line_mean": 29.8947368421,
"line_max": 122,
"alpha_frac": 0.5902896082,
"autogenerated": false,
"ratio": 4.2690909090909095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5359380517290909,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aamir'
from django.db.models import CharField
from django.utils.translation import ugettext_noop as _
from django.core.exceptions import FieldDoesNotExist
from django.core import checks
from .managers import AutoCleanManager
from .util import clean
class AutoCleanField(CharField):
"""
A lower case char field which is dependent on another field value
This field is helpful for unique lookup e.g. in get_or_create
"""
def __init__(self, depends_on=None, manager_name='objects', truncate_symbols=True, *args, **kwargs):
class_name = self.__class__.__name__
if 'editable' in kwargs:
raise TypeError(_('{0} can\'t have a editable constraints. This is automatically set.'
.format(class_name)))
elif not depends_on:
raise TypeError(_('{0} required a keyword parameter `depends_on` for which this field is bound.'
.format(class_name)))
elif not isinstance(depends_on, basestring):
raise TypeError(_('{0} `depends_on` parameter value should be a string (field name).'
.format(class_name)))
self.depends_on = depends_on
self.manager_name = manager_name
self.truncate_symbols = truncate_symbols
kwargs['max_length'] = kwargs.get('max_length', 255)
kwargs['editable'] = False
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(AutoCleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoCleanField, self).check(**kwargs)
errors.extend(self._check_depends_on_field())
return errors
def _check_depends_on_field(self):
try:
field = self.model._meta.get_field(self.depends_on)
except FieldDoesNotExist:
error = _('{0} `depends_on` field "{1}" does not exists in model "{2}".'
.format(self.__class__.__name__, self.depends_on, self.model.__name__))
return [
checks.Error(error, hint=None, obj=self, id='AutoCleanField.E001', )
]
else:
if not isinstance(field, CharField):
error = _('{0} `depends_on` field "{1}" should be an instance of '
'models.CharField.'
.format(self.__class__.__name__, self.depends_on))
return [
checks.Error(error, hint=None, obj=self, id='AutoCleanField.E002', )
]
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoCleanField, self).deconstruct()
if self.max_length != 255:
kwargs.pop('max_length')
if self.depends_on:
kwargs['depends_on'] = self.depends_on
if self.manager_name != 'objects':
kwargs['manager_name'] = self.manager_name
if self.truncate_symbols is not True:
kwargs['truncate_symbols'] = self.truncate_symbols
del kwargs['editable']
kwargs.pop('db_index', None)
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
super(AutoCleanField, self).contribute_to_class(cls, name)
cls.add_to_class(self.manager_name, AutoCleanManager())
def pre_save(self, instance, add):
# auto populate
refined_value = None
value = getattr(instance, self.depends_on)
if value:
refined_value = clean(value, truncate_symbols=self.truncate_symbols)
if not self.blank:
assert refined_value, 'Not able to generate auto value for field "{0}" having value "{1}"'\
.format(self.depends_on, str(value))
# make the updated refined_value available as instance attribute
setattr(instance, self.name, refined_value)
return refined_value
| {
"repo_name": "intellisense/django-autoclean",
"path": "autoclean/fields.py",
"copies": "1",
"size": "3982",
"license": "mit",
"hash": 2679181184261474300,
"line_mean": 36.214953271,
"line_max": 108,
"alpha_frac": 0.5894023104,
"autogenerated": false,
"ratio": 4.191578947368421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.528098125776842,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from models.SearchResult import SearchResultModel
from models.GroupedItem import GroupedItem
class ConditionResultModel(SearchResultModel):
def __init__(self):
self.conditionId = 'cosmic_clinvar'
def addGroupedCosmicConditions(self, groupItemTermName, groupedCondition):
# Search the dictionary for the grouping term
# if we find it we append the grouped condition
# if we don't find it we insert the grouping term and add the grouped condition
foundGroupedItem = False
for grouped_item in self.grouped_items:
if(grouped_item.groupedItemTerm == groupItemTermName):
foundGroupedItem = True
grouped_item.groupedItemsCosmic.append(groupedCondition)
if(not foundGroupedItem):
myGroupedItem = GroupedItem()
myGroupedItem.groupedItemTerm = groupItemTermName
myGroupedItem.groupedItemsCosmic.append(groupedCondition)
self.grouped_items.append(myGroupedItem)
def addGroupedClinvarConditions(self, groupItemTermName, groupedCondition):
# Search the dictionary for the grouping term
# if we find it we append the grouped condition
# if we don't find it we insert the grouping term and add the grouped condition
foundGroupedItem = False
for grouped_item in self.grouped_items:
if(grouped_item.groupedItemTerm == groupItemTermName):
foundGroupedItem = True
grouped_item.groupedItemsClinvar.append(groupedCondition)
if(not foundGroupedItem):
myGroupedItem = GroupedItem()
myGroupedItem.groupedItemTerm = groupItemTermName
myGroupedItem.groupedItemsClinvar.append(groupedCondition)
self.grouped_items.append(myGroupedItem)
def toJson(self):
return_value = SearchResultModel.toJson(self)
grouped_items = return_value['grouped_items']
grouped_items_array = []
for grouped_item in grouped_items:
grouped_items_array.append(grouped_item.toJson())
return_value['grouped_items'] = grouped_items_array
return return_value
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/models/ConditionResults.py",
"copies": "1",
"size": "2201",
"license": "mit",
"hash": 7723667087863331000,
"line_mean": 40.5283018868,
"line_max": 87,
"alpha_frac": 0.6851431168,
"autogenerated": false,
"ratio": 4.315686274509804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5500829391309803,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import pymongo
from bottle import Bottle, request, HTTPError, response
import app
from app import genemania
from bson.json_util import dumps
api = Bottle()
@api.put('/api/nav/project/:id/state/:state')
def put_state(id, state):
client = pymongo.MongoClient(app.mongodb_uri)
genelist = {
'id': 1,
'mygenelist': ['BRCA1','BRCA2']
}
client.nav.projects.save(genelist)
@api.get('/gene/lookup/list/:id')
def get_gene(id):
client = pymongo.MongoClient(app.mongodb_uri)
c = client.identifiers.genemania
id = genemania.lookup_id(id)
if id is None:
return HTTPError(404)
returnValue = c.find({'id': id})
pre_return_value = {result['source']: result['name'] for result in c.find({'id': id})}
return_value = dumps(pre_return_value)
print return_value
if (request.query.callback):
response.content_type = "application/javascript"
return "%s(%s);" % (request.query.callback, return_value)
return pre_return_value
#return {result['source']: result['name'] for result in c.find({'preferred': id, 'source': {'$ne': 'Synonym'}})}
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/nav/myIOSREST.py",
"copies": "1",
"size": "1156",
"license": "mit",
"hash": 6650077529675422000,
"line_mean": 22.5918367347,
"line_max": 116,
"alpha_frac": 0.6461937716,
"autogenerated": false,
"ratio": 3.274787535410765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44209813070107645,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import pymongo
import app.MyGeneInfo
from elasticsearch import Elasticsearch
from app import elastic_search_uri
class TermAnalyzer():
def get_resolved_terms(self):
return self.terms
def process_terms_bulk(self, terms):
terms_with_id = []
#========================
# Process GENOME terms
#========================
analyzed_terms = self.process_genome_terms(terms)
#print analyzed_terms
#========================
# Process DISEASE terms
#========================
analyzed_terms = self.process_disease_terms(terms)
#print analyzed_terms
#print self.terms
#========================
# Process VARIANT terms
#========================
analyzed_terms = self.process_variant_terms(analyzed_terms)
#========================
# Process OTHER terms
#========================
analyzed_terms = self.process_other_terms(analyzed_terms)
#print analyzed_terms
#print self.terms
#========================
# Process UNKNOWN terms
#========================
analyzed_terms = self.process_unknown_terms(analyzed_terms)
#print analyzed_terms
#print self.terms
for term in self.terms:
a = {
'probabilitiesMap': term.probabilitiesMap,
'status': term.status,
'termId': term.termId,
'desc': term.desc,
'geneSymbol': term.geneSymbol,
'termTitle': term.termTitle
}
terms_with_id.append(a)
print len(terms_with_id)
return terms_with_id
#========================
# IDENTIFY UNKNOWN TERMS
#========================
def process_unknown_terms(self, terms):
if(len(terms) > 0):
queryTerms = terms.upper()
queryTermArray = queryTerms.split(',')
types = ['gene','icd10','drug','disease','genome']
for queryTerm in queryTermArray:
term_alt_result = self.identify_alt_term(queryTerm)
a = TermModel()
if(term_alt_result['term'] == 'UNKNOWN'):
a.termId
a.status = 'unknown'
a.termId = queryTerm
self.terms.append(a)
else:
termDesc = ''
termGeneSymbol = ''
term_result_types_array = []
if(term_alt_result['type'] == 'GENE'):
termDesc = term_alt_result['desc']
termGeneSymbol = term_alt_result['geneSymbol']
termTitle = queryTerm.upper() + ' (' + termGeneSymbol.upper() + ')'
a.termId = termGeneSymbol.upper()
if(term_alt_result['type'] not in term_result_types_array):
term_result_types_array.append(term_alt_result['type'])
total_found_terms = float(len(term_result_types_array))
for k in types:
if(k.upper() in term_result_types_array):
a.probabilitiesMap[k] = str(1.0/total_found_terms)
else:
a.probabilitiesMap[k] = str(0.0)
a.desc = termDesc
a.geneSymbol = termGeneSymbol
a.termTitle = termTitle
self.terms.append(a)
#========================
# IDENTIFY OTHER TERMS
#========================
def process_other_terms(self, terms):
queryTerms = terms.upper()
queryTermArray = queryTerms.split(',')
if(len(terms) > 0):
types = ['gene','icd10','drug','disease','genome']
for queryTerm in queryTermArray:
termTitle = queryTerm
a = TermModel()
a.status = 'success'
a.termId = queryTerm
a.termTitle = queryTerm
term_result = self.identify_term(queryTerm)
if(term_result is None or term_result.count() < 1):
warning_msg = 'Not found: ' + queryTerm
else:
termDesc = ''
termGeneSymbol = ''
term_result_types_array = []
for item_type in term_result:
if(item_type['type'] == 'GENE'):
if('desc' in item_type):
termDesc = item_type['desc']
if('geneSymbol' in item_type):
termGeneSymbol = item_type['geneSymbol']
if(len(queryTerm) > 12 and queryTerm[:3] == 'ENS'):
termTitle = termGeneSymbol.upper() + ' (' + queryTerm.upper() + ')'
a.termId = termGeneSymbol.upper()
if(item_type['type'] not in term_result_types_array):
term_result_types_array.append(item_type['type'])
total_found_terms = float(len(term_result_types_array))
for k in types:
if(k.upper() in term_result_types_array):
a.probabilitiesMap[k] = str(1.0/total_found_terms)
else:
a.probabilitiesMap[k] = str(0.0)
a.desc = termDesc
a.geneSymbol = termGeneSymbol
a.termTitle = termTitle
self.terms.append(a)
if(',' not in queryTerms):
if(queryTerm == queryTerms):
queryTerms = ''
else:
queryTerms = queryTerms.replace(queryTerm + ',', '').replace(',,',',')
if(len(queryTerms) > 0 and queryTerms[0:1] == ','):
queryTerms = queryTerms[1:]
if(len(queryTerms) > 0 and queryTerms[-1] == ','):
queryTerms = queryTerms[:-1]
return queryTerms
#========================
# IDENTIFY GENOME TERMS
#========================
def process_genome_terms(self, terms):
terms_uppercase = terms.upper()
for kv in self.genome_id_kv:
if(kv['k'] in terms_uppercase):
terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',')
a = TermModel()
a.probabilitiesMap['genome'] = '1.0'
a.status = 'success'
a.termId = kv['v']
a.desc = 'Genome'
a.geneSymbol = kv['v']
a.termTitle = kv['v'] + ' (' + kv['k'].replace(',',' ') + ')'
self.terms.append(a)
if(len(terms_uppercase) > 0 and terms_uppercase[0:1] == ','):
terms_uppercase = terms_uppercase[1:]
if(len(terms_uppercase) > 0 and terms_uppercase[-1] == ','):
terms_uppercase = terms_uppercase[:-1]
return terms_uppercase
#=========================
# IDENTIFY DISEASE TERMS
#=========================
def process_disease_terms(self, terms):
terms_uppercase = terms.upper()
for kv in self.cancer_id_kv:
if(kv['k'] in terms_uppercase):
terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',')
a = TermModel()
a.probabilitiesMap['disease'] = '1.0'
a.status = 'success'
a.termId = kv['v']
a.desc = 'Disease'
a.geneSymbol = kv['v']
a.termTitle = kv['v'] + ' (' + kv['k'].replace(',',' ') + ')'
self.terms.append(a)
if(len(terms_uppercase) > 0 and terms_uppercase[0:1] == ','):
terms_uppercase = terms_uppercase[1:]
if(len(terms_uppercase) > 0 and terms_uppercase[-1] == ','):
terms_uppercase = terms_uppercase[:-1]
return terms_uppercase
#=========================
# IDENTIFY VARIANT TERMS
#=========================
def process_variant_terms(self, terms):
return_list = ''
terms_uppercase = terms.upper()
terms_array = terms_uppercase.split(',')
for term_to_check in terms_array:
variant_identified = self.identify_variant_term(term_to_check)
if(variant_identified['term'] == 'UNKNOWN'):
return_list += term_to_check + ','
else:
a = TermModel()
a.probabilitiesMap['gene'] = '1.0'
a.status = 'success'
a.termId = variant_identified['geneSymbol']
a.desc = 'Variant'
a.geneSymbol = variant_identified['geneSymbol']
a.termTitle = variant_identified['geneSymbol'] + ' (' + term_to_check + ')'
self.terms.append(a)
b = TermModel()
b.probabilitiesMap['gene'] = '1.0'
b.status = 'success'
b.termId = term_to_check
b.desc = 'Variant'
b.geneSymbol = term_to_check
b.termTitle = term_to_check + ' (' + variant_identified['geneSymbol'] + ')'
self.terms.append(b)
if(len(return_list) > 0 and return_list[0:1] == ','):
return_list = return_list[1:]
if(len(return_list) > 0 and return_list[-1] == ','):
return_list = return_list[:-1]
return return_list
def identify_term(self, name):
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
results = allterms.find({'term': name.upper()})#,'genomeType': 'human'})
return None if results is None else results
def identify_term_partial(self, name):
client = pymongo.MongoClient()
db = client.identifiers
if(len(name) >= 5):
if(name.upper()[:5] == 'CHR1:'):
allterms = db.variants
else:
allterms = db.allterms2
#allterms = db.allterms
else:
allterms = db.allterms2
#allterms = db.allterms
results = allterms.find({'term': {'$regex': '^' + name.upper()} })
return None if results is None else results[:25] # limit the auto complete results to 25
def identify_variant_term(self, variant_term):
client = pymongo.MongoClient()
db = client.identifiers
variants_db = db.variants
results = variants_db.find_one({'term': variant_term.upper()})
if(results is not None and len(results) > 0):
return results
if(results is None or len(results) < 1):
results = {
'term': 'UNKNOWN',
'desc': 'UNKNOWN'
}
return results
def identify_alt_term(self, name):
client = pymongo.MongoClient()
db = client.identifiers
allterms = db.allterms2
gene_alt_id_array = app.MyGeneInfo.get_gene_info_by_id(name)
for gene_alt_id in gene_alt_id_array:
results = allterms.find_one({'term': gene_alt_id.upper(),'genomeType': 'human'})
if(results is not None and len(results) > 0):
return results
if(results is None or len(results) < 1):
results = {
'term': 'UNKNOWN',
'desc': 'UNKNOWN'
}
return results
def get_cancer_description_by_id(self, cancer_id):
for kv in self.cancer_id_kv_lookup:
if(kv['v'] == cancer_id):
return kv['k']
return 'Unknown type'
def get_disease_types_from_ES(self):
client = pymongo.MongoClient()
db = client.identifiers
disease_collection = db.disease_lookup
disease_types = []
search_body = {
'size': 0,
'aggs' : {
'diseases_agg' : {
'terms' : { 'field' : 'network_name', 'size': 100 }
}
}
}
result = self.es.search(
index = 'clusters',
body = search_body
)
count = 0
disease_keys = []
if(result['aggregations']['diseases_agg']['buckets'] < 1):
print 'no results'
else:
for hit in result['aggregations']['diseases_agg']['buckets']:
disease_keys.append(hit['key'])
print hit['key']
for disease_key in disease_keys:
search_body = {
'size': 1,
'query' : {
'bool': {
'must': [{ 'match': {'network_name': disease_key} }]
}
}
}
result = es.search(
index = 'clusters',
body = search_body
)
if(len(result) > 0):
result = result['hits']['hits'][0]['_source']
#disease_collection.save(
# {
# 'id': disease_key,
# 'desc': result['network_full_name'],
# 'synonym': disease_key
# }
#)
print result['network_full_name'] + '\t' + disease_key
return 'success'
def __init__(self):
self.es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300) # Prod Clustered Server
self.terms = []
self.genome_id_kv = [
{'k': 'CANIS,FAMILIARIS', 'v': 'DOG'},
{'k': 'DROSOPHILA,MELANOGASTER', 'v': 'FRUITFLY'},
{'k': 'HOMO,SAPIENS', 'v': 'HUMANS'},
{'k': 'MACACA,MULATTA', 'v': 'MONKEY'},
{'k': 'MUS,MUSCULUS', 'v': 'MOUSE'},
{'k': 'RATTUS,NORVEGICUS', 'v': 'RAT'},
{'k': 'CAENORHABDITIS,ELEGANS', 'v': 'WORM'},
{'k': 'DANIO,RERIO', 'v': 'ZEBRAFISH'}
]
self.cancer_id_kv_lookup = [
{'k': 'Acute Myeloid Leukemia', 'v': 'LAML'},
{'k': 'Adrenocortical carcinoma', 'v': 'ACC'},
{'k': 'Bladder Urothelial Carcinoma', 'v': 'BLCA'},
{'k': 'Brain Lower Grade Glioma', 'v': 'LGG'},
{'k': 'Breast invasive carcinoma', 'v': 'BRCA'},
{'k': 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'v': 'CESC'},
{'k': 'Cholangiocarcinoma', 'v': 'CHOL'},
{'k': 'Colon adenocarcinoma', 'v': 'COAD'},
{'k': 'Esophageal carcinoma', 'v': 'ESCA'},
{'k': 'FFPE Pilot Phase II', 'v': 'FPPP'},
{'k': 'Glioblastoma multiforme', 'v': 'GBM'},
{'k': 'Head and Neck squamous cell carcinoma', 'v': 'HNSC'},
{'k': 'Kidney Chromophobe', 'v': 'KICH'},
{'k': 'Kidney renal clear cell carcinoma', 'v': 'KIRC'},
{'k': 'Kidney renal papillary cell carcinoma', 'v': 'KIRP'},
{'k': 'Liver hepatocellular carcinoma', 'v': 'LIHC'},
{'k': 'Lung adenocarcinoma', 'v': 'LUAD'},
{'k': 'Lung squamous cell carcinoma', 'v': 'LUSC'},
{'k': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'v': 'DLBC'},
{'k': 'Mesothelioma', 'v': 'MESO'},
{'k': 'Ovarian serous cystadenocarcinoma', 'v': 'OV'},
{'k': 'Pancreatic adenocarcinoma', 'v': 'PAAD'},
{'k': 'Pheochromocytoma and Paraganglioma', 'v': 'PCPG'},
{'k': 'Prostate adenocarcinoma', 'v': 'PRAD'},
{'k': 'Rectum adenocarcinoma', 'v': 'READ'},
{'k': 'Sarcoma', 'v': 'SARC'},
{'k': 'Skin Cutaneous Melanoma', 'v': 'SKCM'},
{'k': 'Stomach adenocarcinoma', 'v': 'STAD'},
{'k': 'Testicular Germ Cell Tumors', 'v': 'TGCT'},
{'k': 'Thymoma', 'v': 'THYM'},
{'k': 'Thyroid carcinoma', 'v': 'THCA'},
{'k': 'Uterine Carcinosarcoma', 'v': 'UCS'},
{'k': 'Uterine Corpus Endometrial Carcinoma', 'v': 'UCEC'},
{'k': 'KIPAN', 'v': 'KIPAN'},
{'k': 'COADREAD', 'v': 'COADREAD'},
{'k': 'Glioma and Glioblastoma', 'v': 'GBMLGG'},
{'k': 'Uveal Melanoma', 'v': 'UVM'}
]
self.cancer_id_kv = [
{'k': 'BLADDER,CANCER', 'v': 'BLCA'},
{'k': 'BRAIN,CANCER', 'v': 'LGG'},
{'k': 'BREAST,CANCER', 'v': 'BRCA'},
{'k': 'CERVICAL,CANCER', 'v': 'CESC'},
{'k': 'ENDOCERVICAL,CANCER', 'v': 'CESC'},
{'k': 'CERVICAL,CANCER', 'v': 'CESC'},
{'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'},
{'k': 'BILE,DUCT,CANCER', 'v': 'CHOL'},
{'k': 'COLON,CANCER', 'v': 'COAD'},
{'k': 'ESOPHAGEAL,CANCER', 'v': 'ESCA'},
{'k': 'GLIOBLASTOMA,CANCER', 'v': 'GBM'},
{'k': 'HEAD,AND,NECK,CANCER', 'v': 'HNSC'},
{'k': 'NECK,CANCER', 'v': 'HNSC'},
{'k': 'HEAD,CANCER', 'v': 'HNSC'},
{'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'},
{'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'},
{'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'},
{'k': 'LIVER,CANCER', 'v': 'LIHC'},
{'k': 'LUNG,CANCER', 'v': 'LUAD'},
{'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'},
{'k': 'LYMPHOID,CANCER', 'v': 'DLBC'},
{'k': 'LYMPHOMA,CANCER', 'v': 'DLBC'},
{'k': 'MESOTHELIOMA,CANCER', 'v': 'MESO'},
{'k': 'OVARIAN,CANCER', 'v': 'OV'},
{'k': 'PANCREATIC,CANCER', 'v': 'PAAD'},
{'k': 'PHEOCHROMOCYTOMA,CANCER', 'v': 'PCPG'},
{'k': 'PARAGANGLIOMA,CANCER', 'v': 'PCPG'},
{'k': 'PROSTATE,CANCER', 'v': 'PRAD'},
{'k': 'RECTUM,CANCER', 'v': 'READ'},
{'k': 'SARCOMA,CANCER', 'v': 'SARC'},
{'k': 'SKIN,CANCER', 'v': 'SKCM'},
{'k': 'STOMACH,CANCER', 'v': 'STAD'},
{'k': 'TESTICULAR,CANCER', 'v': 'TGCT'},
{'k': 'THYMOMA,CANCER', 'v': 'THYM'},
{'k': 'THYROID,CANCER', 'v': 'THCA'},
{'k': 'UTERINE,CANCER', 'v': 'UCS'},
{'k': 'GLIOMA,GLIOBLASTOMA', 'v': 'GBMLGG'},
{'k': 'UTERINE,CORPUS,ENDOMETRIAL,CANCER', 'v': 'UCEC'},
{'k': 'UVEAL,MELANOMA,CANCER', 'v': 'UVM'},
{'k': 'UVEAL,CANCER', 'v': 'UVM'},
{'k': 'LEUKEMIA', 'v': 'LAML'},
{'k': 'MYELOID,LEUKEMIA', 'v': 'LAML'},
{'k': 'ADRENOCORTICAL,CARCINOMA', 'v': 'ACC'},
{'k': 'BLADDER,UROTHELIAL,CARCINOMA', 'v': 'BLCA'},
{'k': 'BRAIN,LOWER,GRADE,GLIOMA', 'v': 'LGG'},
{'k': 'BREAST,INVASIVE,CARCINOMA', 'v': 'BRCA'},
{'k': 'CERVICAL,SQUAMOUS,CELL,CARCINOMA', 'v': 'CESC'},
{'k': 'ENDOCERVICAL,ADENOCARCINOMA', 'v': 'CESC'},
{'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'},
{'k': 'COLON,ADENOCARCINOMA', 'v': 'COAD'},
{'k': 'ESOPHAGEAL,CARCINOMA', 'v': 'ESCA'},
{'k': 'GLIOBLASTOMA,MULTIFORME', 'v': 'GBM'},
{'k': 'HEAD,AND,NECK,SQUAMOUS,CELL,CARCINOMA', 'v': 'HNSC'},
{'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'},
{'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'},
{'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'},
{'k': 'LIVER,HEPATOCELLULAR,CARCINOMA', 'v': 'LIHC'},
{'k': 'LUNG,ADENOCARCINOMA', 'v': 'LUAD'},
{'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'},
{'k': 'LYMPHOID,NEOPLASM,DIFFUSE,LARGE,B-CELL,LYMPHOMA', 'v': 'DLBC'},
{'k': 'MESOTHELIOMA', 'v': 'MESO'},
{'k': 'OVARIAN,SEROUS,CYSTADENOCARCINOMA', 'v': 'OV'},
{'k': 'PANCREATIC,ADENOCARCINOMA', 'v': 'PAAD'},
{'k': 'PHEOCHROMOCYTOMA', 'v': 'PCPG'},
{'k': 'PARAGANGLIOMA', 'v': 'PCPG'},
{'k': 'PROSTATE,ADENOCARCINOMA', 'v': 'PRAD'},
{'k': 'RECTUM,ADENOCARCINOMA', 'v': 'READ'},
{'k': 'SARCOMA', 'v': 'SARC'},
{'k': 'SKIN,CUTANEOUS,MELANOMA', 'v': 'SKCM'},
{'k': 'STOMACH,ADENOCARCINOMA', 'v': 'STAD'},
{'k': 'TESTICULAR,GERM,CELL,TUMORS', 'v': 'TGCT'},
{'k': 'THYMOMA', 'v': 'THYM'},
{'k': 'THYROID,CARCINOMA', 'v': 'THCA'},
{'k': 'UTERINE,CARCINOSARCOMA', 'v': 'UCS'},
{'k': 'UTERINE,CORPUS,ENDOMETRIAL,CARCINOMA', 'v': 'UCEC'},
{'k': 'UVEAL,MELANOMA', 'v': 'UVM'}
]
class TermModel():
def __init__(self):
self.probabilitiesMap = {
'gene': '0.0',
'icd10': '0.0',
'drug': '0.0',
'disease': '0.0',
'genome': '0.0'
}
self.status = 'pending'
self.termId = 'none'
self.desc = 'none'
self.geneSymbol = 'none'
self.termTitle = 'none'
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/models/TermResolver.py",
"copies": "1",
"size": "21104",
"license": "mit",
"hash": -7926714475220083000,
"line_mean": 36.0896309315,
"line_max": 113,
"alpha_frac": 0.4542740713,
"autogenerated": false,
"ratio": 3.4104718810601162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4364745952360116,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import pymongo
import sys
import requests
import os
import json
def populate_gene_db():
client = pymongo.MongoClient()
db = client.identifiers
genemania = db.genemania
genemania_pubmed = db.genemania_pubmed
genemania_pubmed.drop()
genemania_items = genemania.find({'source': 'Gene Name'})
loop_count = 0
for item in genemania_items:
if(loop_count < 200):
pubmed_url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=' + item['name'] + '&retmode=json&retmax=2000'
r_json = requests.get(pubmed_url).json()
if('esearchresult' in r_json):
#print r_json['esearchresult']['count']
genemania_pubmed.save(
{
'gene':item['name'],
'idlist': r_json['esearchresult']['idlist']
}
)
print item['name']
#else:
#break
print loop_count
loop_count += 1
genemania_pubmed.ensure_index([("gene" , pymongo.ASCENDING)])
genemania_pubmed.ensure_index([("idlist" , pymongo.ASCENDING)])
def read_pubmed_files():
client = pymongo.MongoClient()
db = client.identifiers
genemania_pubmed = db.genemania_pubmed
file_list = os.listdir("../pubmed_json_files")
for file_item in file_list:
found_gene_record = genemania_pubmed.find_one({'idlist': file_item.replace('.json','')})
if(found_gene_record is not None):
print found_gene_record['gene']
#print file_item
def run_sample():
client = pymongo.MongoClient()
db = client.identifiers
genemania = db.genemania # ret collection
gene_pubmed = db.gene_pubmed
gene_abstract = db.gene_abstract
genemania_items = genemania.find({'source':'Gene Name'}) # ret cursor
gene_pubmed_items = gene_pubmed.find()
count = 0
# for item in genemania_items:
# if count > 200:
# break
# pubmed_url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s&retmode=json&retmax=2000' % item['name']
# json_obj = requests.get(pubmed_url).json()
# validate
# if 'esearchresult' in json_obj:
# print json_obj['esearchresult']['count']
# id_list = json_obj['esearchresult']['idlist']
# print(id_list)
# gene_pubmed.save(
# {'gene':item['name'],
# 'idlist':id_list}
# )
# update document with idlist
# print item['name']
# count += 1
# create index for gene field
# gene_pubmed.ensure_index([("gene", pymongo.ASCENDING)])
# test find by index on gene field
print gene_pubmed.find_one({'gene':'NFYA'})
# read in pubmed files from directory and search in gene database
for filename in os.listdir("../pubmed_json_files/"):
filepath = "../pubmed_json_files/" + filename
try:
json_pubmed = json.loads(open(filepath).read())
except ValueError as e:
break
filename = filename.replace('.json', "")
print filename
# look up genes in gene_pubmed database that have same pub med id
for gene in gene_pubmed_items:
print gene['gene']
if filename in gene['idlist']:
# if pub med id matches, save gene name and pub med abstract to gene_abstract database
print " - FOUND GENE: " + gene['gene']
gene_abstract.save(
{'gene': gene['name'],
'abstract': json_pubmed['abstract']}
)
if __name__ == '__main__':
#populate_gene_db()
#read_pubmed_files()
run_sample()
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/genemania_pubmed.py",
"copies": "1",
"size": "3764",
"license": "mit",
"hash": -3485441007078305300,
"line_mean": 28.6377952756,
"line_max": 145,
"alpha_frac": 0.5719978746,
"autogenerated": false,
"ratio": 3.5779467680608366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46499446426608365,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import requests
import tarfile,sys
import urllib2
import json
import time
import app.genemania
import pymongo
from itertools import islice
from bson.json_util import dumps
def lookup_id(name):
'''
:param name: gene id, symbol, or synonym to find in the name column (case insensitive)
:return: ensembl ID (or None)
'''
c = pymongo.MongoClient().datasets.nih
results = c.find({'Symbol': 'FOSB'})
for result in results:
print result['description']
return None if results is None else results
def load_gene_info():
client = pymongo.MongoClient()
db = client.datasets
# collection stores metadata about source networks
nih = db.nih
url = 'http://ec2-54-148-99-18.us-west-2.compute.amazonaws.com:9200/_plugin/head/gene_info_small3b.txt'
#url = 'http://ec2-54-148-99-18.us-west-2.compute.amazonaws.com:9200/_plugin/head/gene_info_small3c.txt'
#url = 'http://ec2-54-148-99-18.us-west-2.compute.amazonaws.com:9200/_plugin/head/gene_info_smallx.txt'
r = requests.get(url)
lines = r.iter_lines()
lines.next() # ignore header row
def parse(lines):
for line in lines:
#for line in lines:
try:
field1, field2, field3, field4, field5, field6, field7, field8, field9, field10, field11, field12, field13, field14, field15 = line.split('\t')
yield {
'Symbol': field3.upper(),
'GeneID': field2.upper(),
'Synonyms': field5.upper(),
'description': field9,
'type_of_gene': field10
}
except Exception as e:
print e.message
count = 0
iterator = parse(lines)
while True:
records = [record for record in islice(iterator, 1000)]
if len(records) > 0:
count += len(nih.insert_many(records).inserted_ids)
print('inserted %d identifiers (%d total)', len(records), count)
else:
break
nih.create_indexes([
pymongo.IndexModel([('Symbol', pymongo.ASCENDING)]),
pymongo.IndexModel([('GeneID', pymongo.ASCENDING)])
])
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/NihNcbi.py",
"copies": "1",
"size": "2200",
"license": "mit",
"hash": -3529697229851589600,
"line_mean": 31.3529411765,
"line_max": 159,
"alpha_frac": 0.6045454545,
"autogenerated": false,
"ratio": 3.6065573770491803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47111028315491804,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import requests
import tarfile,sys
import urllib2
import json
import time
import pymongo
import nav.api
from itertools import islice
from elasticsearch import Elasticsearch
from bson.json_util import dumps
from collections import Counter
from app import go
from app import PubMed
#es = Elasticsearch(send_get_body_as='POST')
es = Elasticsearch(['http://ec2-52-24-205-32.us-west-2.compute.amazonaws.com:9200/'],send_get_body_as='POST') # Clustered Server
#es = Elasticsearch(['http://ec2-54-148-99-18.us-west-2.compute.amazonaws.com:9200'],send_get_body_as='POST')
#es = Elasticsearch()
def star_search_mapped_2_0(query_terms):
gene_network_data = {
'searchGroupTitle': "Star Results",
'clusterNodeName': "Lucene score",
'searchTab': "GENES",
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
unsorted_items = []
gene_super_list = []
sorted_query_list = PubMed.get_gene_pubmed_counts(query_terms)#PubMed.get_gene_pubmed_counts_normalized(query_terms, 10)
sorted_query_list_json = dumps(sorted_query_list)
boostValue = get_boost_value(sorted_query_list['results'], 'AANAT')
queryTermArray = query_terms.split(',')
should_match = [] #[{ 'match': {'networkName': network_info['cancerType']}}]
for queryTerm in queryTermArray:
#should_match.append( { 'match': {network_info['matchField']: queryTerm} })
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
should_match.append({"match": {"node_list.node.name":{"query": queryTerm,"boost": boost_value_append}}})
gene_network_data['geneSuperList'].append({'queryTerm': queryTerm, 'boostValue': boost_value_append})
query_terms = query_terms.replace(",", "*")
es_body = {
'fields': ['nodeName'],
'query': {
'bool': {
'should': should_match,
}
}
}
es_body_json = dumps(es_body)
result = es.search(
index='network',
doc_type='node', # node
body={
'fields': ['nodeName', 'source', 'degree', 'node_list.node.name'],
'query': {
'bool': {
'must': [{'match': {'networkName': 'BRCA'}}],
'should': should_match,
}
}
,'size': 50
}
)
should_json = dumps(should_match)
result_json = dumps(result)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
hitMax = 0
hitMin = 0
if(result['hits']['total'] < 1):
print 'no results'
for hit in result['hits']['hits']:
if(hitCount == 0):
hitMax = hit['_score']
else:
hitMin = hit['_score']
searchResultSummaryString = hit["fields"]["source"][0] + '-' + hit["fields"]["degree"][0]
geneNeighborhoodArray = [];
for genehit in hit["fields"]["node_list.node.name"]:
geneNameDisected = genehit.split(':')
if(len(geneNameDisected) > 1):
geneNeighborhoodArray.append(geneNameDisected[0])
else:
geneNeighborhoodArray.append(genehit)
x = [set(geneNeighborhoodArray), set(queryTermArray)]
y = set.intersection(*x)
hit_score = float(hit["_score"])
gene_network_data_items = {
#'searchResultTitle': hit["_source"]["source"] + '-' + hit["_source"]["degree"] + '-' + hit["_source"]["nodeName"],
'searchResultTitle': hit["fields"]["nodeName"][0],
'clusterName': hit["fields"]["nodeName"][0],
'searchResultSummary': searchResultSummaryString,
'luceneScore': hit["_score"],
'boostValue': get_boost_value(sorted_query_list['results'], hit["fields"]["nodeName"][0]),
'searchResultScoreRankTitle': 'lucene boosted score ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': set(y),
#'emphasizeInfoArray': [],
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0
}
unsorted_items.append(gene_network_data_items)
hitCount += 1
gene_network_data['items'] = unsorted_items
#print('%s ' % dumps(gene_network_data))
return [gene_network_data]
def get_boost_value(boostArray, idToCheck):
for boostItem in boostArray:
if(boostItem['id'] == idToCheck):
returnThisValue = boostItem['normalizedValue']
return boostItem['normalizedValue']
return 0 | {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/NeighborhoodSearch.py",
"copies": "1",
"size": "4801",
"license": "mit",
"hash": 5946266885547932000,
"line_mean": 31.8904109589,
"line_max": 128,
"alpha_frac": 0.5682149552,
"autogenerated": false,
"ratio": 3.7332814930015554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48014964482015554,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import unittest
import warnings
from app import SearchPathwaysTab
from app import SearchConditionsTab
from app import SearchAuthorsTab
from app import SearchDrugsTab
from app import SearchViz
from bson.json_util import dumps
#from app.nav import job_queue
import json
class ClusterSearchTests(unittest.TestCase):
#==============================
# CLUSTER SEARCH TEST
#==============================
def test_get_cluster_search_mapped(self):
search_results = SearchPathwaysTab.get_cluster_search_mapped('GATA1,GATA2',99)
for search_result in search_results:
if(len(search_result['disease_filter_all']) < 1):
self.fail('No diseases found')
if(len(search_result['matrix_filter_all']) < 1):
self.fail('No matrix types found')
if(len(search_result['annotation_filter_all']) < 1):
self.fail('No annotations found')
for grouped_item in search_result['grouped_items']:
if(grouped_item['groupTopQValue'] < 3.5):
warnings.warn("Result has Q value lower than 3.5: " + dumps(grouped_item), Warning)
for group_member in grouped_item['group_members']:
if(len(group_member['emphasizeInfoArray']) < 1):
warnings.warn("Group result has no overlap: " + dumps(group_member), Warning)
try:
search_results = SearchPathwaysTab.get_cluster_search_mapped('INVALIDGENE1,INVALIDGENE2',99)
except Exception:
self.fail('Unrecognized query terms')
self.assertTrue(1 == 1)
#==============================
# PHENOTYPE SEARCH TEST
#==============================
def test_get_conditions_search(self):
search_results = json.loads(SearchConditionsTab.get_condition_search('DLL4,PLAC8L1,GLTP', 99))
for search_result in search_results:
self.assertEqual(search_result['searchTab'], 'PHENOTYPES')
for tissue_group in search_result['simple_disease_tissue_group']:
self.assertGreater(tissue_group['grouped_by_conditions_count'],0)
#==============================
# AUTHOR SEARCH TEST
#==============================
def test_get_authors_search(self):
search_results = SearchAuthorsTab.get_people_people_pubmed_search_mapped2('DLL4,PLAC8L1,GLTP', 1)
for search_result in search_results:
self.assertEqual(search_result['searchTab'], 'PEOPLE_GENE')
#==============================
# DRUG SEARCH TEST
#==============================
def test_get_drugs_search(self):
search_results = SearchDrugsTab.get_drug_network_search_mapped('DLL4,PLAC8L1,GLTP')
for search_result in search_results:
self.assertEqual(search_result['searchTab'], 'DRUG') # drug
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/unit_tests/Search_Tests.py",
"copies": "1",
"size": "2878",
"license": "mit",
"hash": -2571232189833258000,
"line_mean": 37.3733333333,
"line_max": 105,
"alpha_frac": 0.5889506602,
"autogenerated": false,
"ratio": 3.9751381215469612,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506408878174696,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from app import PubMed
from app import elastic_search_uri
from collections import Counter
from elasticsearch import Elasticsearch
from bson.json_util import dumps
import matplotlib.pyplot as plt
import matplotlib.colors as mpclrs
import pymongo
from multiprocessing import Manager, Process
import copy
import app.datascience.drug_gene_heatprop
import imp
imp.reload(app.datascience.drug_gene_heatprop)
import numpy as np
import math
import networkx as nx
import pandas as pd
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300) # Prod Clustered Server
#TODO: Figure out how to merge/shuffle the drug results
#TODO: Figure out which of the 40 clusters are best for inferred drug searching
#============================
#============================
# DRUG SEARCH
#============================
#============================
def get_inferred_drug_search(es_id_list):
es_id_array = es_id_list.split(',')
#inferred_drugs = get_drugs_from_clusters(es_id_array) #['2010018824','2010011335'])
inferred_drugs = get_drugs_from_clusters_group_drugs(es_id_array) #['2010018824','2010011335'])
return inferred_drugs
def get_drugs_from_clusters_group_drugs(es_id_array):
should_match = []
for es_id in es_id_array:
should_match.append({"match": {"node_name": es_id}})
search_body = {
'query': {
'bool': {
'should': should_match
}
},
'size': len(es_id_array)
}
result = es.search(
index = 'groups',
doc_type = 'clusters_drugs',
body = search_body
)
if(result['hits']['total'] < 1):
print 'no results'
return_data = {
'drugs': [],
'grouped_drugs': [],
'final_grouping': []
}
for hit in result['hits']['hits']:
drug_name_array = []
for drugNodeHit in hit["_source"]["node_list"][:10]:
#check if the gene already exists
if(drugNodeHit['drug_name'] not in return_data['drugs']):
#if not add initial case
return_data['drugs'].append(drugNodeHit['drug_name'])
return_data['grouped_drugs'].append(
{
'key': drugNodeHit['drug_name'],
'drug_bank_id': drugNodeHit['drug_id'],
'genes': [drugNodeHit['gene']],
'value': [drugNodeHit], #drugNodeHit contains all the information for the drug (ids, doc_id, etc)
'gene_count': 1
}
)
else:
#if so append to the existing group
for grouped_item in return_data['grouped_drugs']:
if(drugNodeHit['drug_name'] == grouped_item['key']):
if(drugNodeHit['gene'] not in grouped_item['genes']):
grouped_item['genes'].append(drugNodeHit['gene'])
grouped_item['gene_count'] += 1
grouped_item['value'].append(drugNodeHit)
for drug_node in return_data['grouped_drugs']:
if(len(drug_node['genes']) > 0):
return_data['final_grouping'].append(drug_node)
#print dumps(return_data['final_grouping'])
#print len(return_data['final_grouping'])
return return_data['final_grouping']
def get_drugs_from_clusters(es_id_array):
should_match = []
for es_id in es_id_array:
should_match.append({"match": {"node_name": es_id}})
search_body = {
'query': {
'bool': {
'should': should_match
}
},
'size': 50
}
result = es.search(
index = 'groups',
doc_type = 'clusters_drugs',
body = search_body
)
if(result['hits']['total'] < 1):
print 'no results'
return_data = {
'genes': [],
'grouped_genes': []
}
for hit in result['hits']['hits']:
drug_name_array = []
for geneNodeHit in hit["_source"]["node_list"]:
#check if the gene already exists
if(geneNodeHit['gene'] not in return_data['genes']):
#if not add initial case
return_data['genes'].append(geneNodeHit['gene'])
return_data['grouped_genes'].append(
{
'key': geneNodeHit['gene'],
'drug_bank_id': geneNodeHit['drug_id'],
'drugs': [geneNodeHit['drug_name']],
'value': [geneNodeHit] #geneNodeHit contains all the information for the drug (ids, doc_id, etc)
}
)
else:
#if so append to the existing group
for grouped_item in return_data['grouped_genes']:
if(geneNodeHit['gene'] == grouped_item['key']):
if(geneNodeHit['drug_name'] not in grouped_item['drugs']):
grouped_item['drugs'].append(geneNodeHit['drug_name'])
grouped_item['value'].append(geneNodeHit)
print dumps(return_data)
return return_data
def expriment_1(seed_genes, esIds):
client = pymongo.MongoClient()
db = client.identifiers
drugbank_collection = db.drugbank
seed_genes_array = seed_genes.split(',')
es_id_array = esIds.split(',')
#seed_genes_array = ['OR2J3','AANAT','KRT80','MACC1','LOC139201','CCDC158','PLAC8L1','CLK1','GLTP','PITPNM2','TRAPPC8','EIF2S2','PNLIP','EHF','FOSB','MTMR4','USP46','CDH11','ENAH','CNOT7','STK39','CAPZA1','STIM2','DLL4','WEE1','MYO1D','TEAD3']
#es_id_array_debug = ['2020014671','2020004787','2010025212','2010007504','2010020100','2010020273']
return_value = None
return_value_array = []
#================================
# Run the heat prop in parallel
#================================
manager = Manager()
return_dict = manager.dict()
jobs = []
#for es_id in es_id_array:
# p = Process(target=get_heat_prop_from_es_id, args=(es_id, seed_genes_array, False, return_dict))
# jobs.append(p)
# p.start()
#for proc in jobs:
# proc.join()
#inferred_drug_group_array = return_dict.values()
for es_id in es_id_array:
jobs.append(get_heat_prop_from_es_id(es_id, seed_genes_array, False, {}))
inferred_drug_group_array = jobs
print dumps(jobs)
merged_by_rank = []
for a in inferred_drug_group_array:
for b in a['inferred_drugs']:
found_match = False
for c in merged_by_rank:
if(c['drug_bank_id'] == b['drug_bank_id']):
disease_type_found = False
for d in c['diseases_with_rank']:
if(d['disease'] == b['disease_type']):
disease_type_found = True
if(not disease_type_found):
c['diseases_with_rank'].append(
{
'disease': b['disease_type'],
'rank': b['heat_rank']
}
)
found_match = True
if(not found_match):
b1 = copy.deepcopy(b)
b1['diseases_with_rank'] = [
{
'disease': b1['disease_type'],
'rank': b1['heat_rank']
}
]
merged_by_rank.append(b1)
return_value_array = {'inferred_drugs': merged_by_rank} #return_dict.values()
return return_value_array
def get_heat_prop_from_es_id(es_id, seed_genes_array, include_evidence_graph=False, return_dict=None):
client = pymongo.MongoClient()
db = client.identifiers
drugbank_collection = db.drugbank
print es_id
disease_type = get_cluster_disease_by_es_id(es_id)
cluster_data = load_x_y_z_cluster_data(es_id)
cluster_x_y_z = cluster_data['cluster']
gene_drug_df = app.datascience.drug_gene_heatprop.drug_gene_heatprop(seed_genes_array,cluster_x_y_z,plot_flag=False)
gene_drug_json = gene_drug_df.reset_index().to_dict(orient='index')
#print dumps(gene_drug_json)
one_gene_many_drugs = []
hot_genes = []
hot_genes_with_heat_value = []
hot_genes_values = []
for key, value in gene_drug_json.iteritems():
if(len(value['drugs']) > 0 and value['index'] not in seed_genes_array):
if(value['heat_value'] > 0.00001):
one_gene_many_drugs.append({
'gene': value['index'],
'drugs': value['drugs'],
'heat_value': float("{0:f}".format(value['heat_value'])),
'heat_rank': value['heat_rank']
})
if(value['heat_value'] > 0.00001):
hot_genes.append(value['index'])
drugs_array_desc = []
drugs_for_pop_up = ''
node_info_for_pop_up = '<span style="font-weight: bold; margin-bottom: 5px;">Drugs associated with ' + value['index'] + ':</span><br><div style="margin-left: 10px;">'
for drug_id in value['drugs']:
drugs_array_desc.append(get_drugbank_name(drug_id, drugbank_collection))
drugs_for_pop_up += get_drugbank_name(drug_id, drugbank_collection) + '\n'
node_info_for_pop_up += get_drugbank_name(drug_id, drugbank_collection) + '<br>'
node_info_for_pop_up += '</div>'
#print 'gene_id: ' + value['index'] + ' heat: ' + str(value['heat_value'] * 100000)
if(value['index'] not in seed_genes_array):
hot_genes_values.append(value['heat_value'] * 100000)
hot_genes_with_heat_value.append(
{
'gene_id': value['index'],
'heat_value': value['heat_value'] * 100000,
'drugable': len(drugs_for_pop_up) > 0,
'seed_gene': False,
'drugs': value['index'] + '\nDrugs targeting this gene:\n\n' + drugs_for_pop_up,
'node_info': value['index'],
'pop_up_info': node_info_for_pop_up
}
)
else:
hot_genes_with_heat_value.append(
{
'gene_id': value['index'],
'heat_value': value['heat_value'] * 100000,
'drugable': len(drugs_for_pop_up) > 0,
'seed_gene': True,
'drugs': value['index'] + '\n[SEED GENE]\nFor drugs that are directly \ntargeting this query gene \nsee results above', # This is a seed gene. We are not showing direct drugs only inferred drugs.
'node_info': value['index'],
'pop_up_info': node_info_for_pop_up
}
)
else:
hot_genes_with_heat_value.append(
{
'gene_id': value['index'],
'seed_gene': False, # seed genes are intrinsically hot
'heat_value': 0.0
}
)
if(len(hot_genes_values) < 1):
max_hot_genes_value = 0
else:
max_hot_genes_value = max(hot_genes_values)
#========================================
# Results are one to many with drugname
# as the key. We need to group by gene
#========================================
if(len(one_gene_many_drugs) < 2):
return None
else:
one_drug_many_genes = []
for gene_drugs in one_gene_many_drugs:
found_drug = False
for drug in gene_drugs['drugs']:
for match_this_drug in one_drug_many_genes:
if(drug == match_this_drug['drug_bank_id']):
drug_bank_desc = get_drugbank_name(drug, drugbank_collection)
match_this_drug['genes'].append(gene_drugs['gene'])
max_heat_map_value = match_this_drug['heat_value']
if(gene_drugs['heat_value'] > max_heat_map_value):
max_heat_map_value = gene_drugs['heat_value']
max_heat_rank_value = match_this_drug['heat_rank']
if(gene_drugs['heat_rank'] > max_heat_rank_value):
max_heat_rank_value = gene_drugs['heat_rank']
match_this_drug['gene_count'] += 1
match_this_drug['heat_value'] = max_heat_map_value
match_this_drug['heat_rank'] = max_heat_rank_value
match_this_drug['value'].append({
'drug_name': drug_bank_desc,
'gene': gene_drugs['gene'],
'doc_id': '0',
'drug_id': drug
})
found_drug = True
break
if(not found_drug):
drug_bank_desc = get_drugbank_name(drug, drugbank_collection)
one_drug_many_genes.append({
'drug_bank_id': drug,
'heat_value': float("{0:f}".format(gene_drugs['heat_value'])),
'heat_rank': gene_drugs['heat_rank'],
'genes': [gene_drugs['gene']],
'value': [
{
'drug_name': drug_bank_desc,
'gene': gene_drugs['gene'],
'doc_id': '0',
'drug_id': drug
}
],
'key': drug_bank_desc,
'gene_count': 1,
'disease_type': disease_type
})
top_drugs_sorted_list = one_drug_many_genes #sorted(one_drug_many_genes, key=lambda k: k['gene_count'])
H = cluster_x_y_z.subgraph(hot_genes)
nodes = H.nodes()
numnodes = len(nodes)
edges=H.edges(data=True)
numedges = len(edges)
print 'Edges len: ' + str(len(edges))
color_list = plt.cm.OrRd(np.linspace(0, 1, 100))
inferred_drug_graph = {
'directed':False,
'nodes':[],
'links':[]
}
if(include_evidence_graph):
nodes_dict = []
log_norm_value = mpclrs.LogNorm(0, max_hot_genes_value, clip=False)
for n in nodes:
found_hot_gene = False
for hv in hot_genes_with_heat_value:
if(hv['gene_id'] == n):
color_list_index = math.ceil((hv['heat_value']/max_hot_genes_value) * 100) - 1
font_color = 'black'
if(color_list_index > 99):
color_list_index = 99
if(color_list_index > 40):
font_color = 'white'
if(hv['drugable']):
nodes_dict.append({"id":n,"com":0, "node_type": "DRUGABLE", "drugs": hv['drugs'], 'node_info': hv['node_info'], 'pop_up_info': hv['pop_up_info'], 'seed_gene': hv['seed_gene'], 'font_color': font_color, "degree":H.degree(n),"rfrac":int(color_list[color_list_index][0] * 255),"gfrac":int(color_list[color_list_index][1] * 255),"bfrac":int(color_list[color_list_index][2] * 255)} )
else:
nodes_dict.append({"id":n,"com":0, "node_type": "NORMAL", "drugs": [], 'node_info': hv['node_info'], 'pop_up_info': 'No drugs available for this gene', 'seed_gene': hv['seed_gene'], 'font_color': font_color, "degree":H.degree(n),"rfrac":int(color_list[color_list_index][0] * 255),"gfrac":int(color_list[color_list_index][1] * 255),"bfrac":int(color_list[color_list_index][2] * 255)} )
found_hot_gene = True
break
if(not found_hot_gene):
nodes_dict.append({"id":n,"com":0,"degree":H.degree(n),"rfrac":color_list[0][0] * 255,"gfrac":color_list[0][1] * 255,"bfrac":color_list[0][2] * 255} )
node_map = dict(zip(nodes,range(numnodes))) # map to indices for source/target in edges
edges_dict = [{"source":node_map[edges[i][0]], "target":node_map[edges[i][1]], "weight":edges[i][2]['weight']} for i in range(numedges)]
inferred_drug_graph = {
'directed':False,
'nodes':nodes_dict,
'links':edges_dict
}
if(return_dict is not None):
return_dict[es_id] = es_id
if(top_drugs_sorted_list is not None):
if(len(top_drugs_sorted_list) >= 25):
if(return_dict is not None):
return_dict[es_id] = {'inferred_drugs': top_drugs_sorted_list[:24], 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type}
return {'inferred_drugs': top_drugs_sorted_list[:24], 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type}
else:
if(return_dict is not None):
return_dict[es_id] = {'inferred_drugs': top_drugs_sorted_list, 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type}
return {'inferred_drugs': top_drugs_sorted_list, 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type}
else:
if(return_dict is not None):
return_dict[es_id] = {'inferred_drugs': [{'nodes': [], 'edges':[]}], 'evidence_graph': {'directed':False,'nodes':[],'links':[]}, 'disease_type': disease_type}
return {'inferred_drugs': [{'nodes': [], 'edges':[]}], 'evidence_graph': {'directed':False,'nodes':[],'links':[]}, 'disease_type': disease_type}
def get_drugbank_name(db_id, drugbank_mongo_collection):
drug_bank_found = drugbank_mongo_collection.find_one({'drug_bank_id': db_id})
drug_bank_desc = ''
if(drug_bank_found is not None):
drug_bank_desc = drug_bank_found['drug_desc']
else:
drug_bank_desc = db_id
return drug_bank_desc
def get_cluster_disease_by_es_id(es_id):
search_body = {
'fields': [
'network_full_name'
],
'query': {
'bool': {
'must': [
{ 'match': {'_id': es_id} }
]
}
}
}
result = es.search(
index = 'clusters',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(len(result['hits']['hits']) > 0):
hit = result['hits']['hits'][0]
if('network_full_name' in hit["fields"]):
if(len(hit["fields"]["network_full_name"]) > 0):
return hit["fields"]["network_full_name"][0]
else:
return 'unknown'
def load_x_y_z_cluster_data(esId):
# results = get_cluster_document_from_elastic_by_id3('2020035052')
xValues = []
yValues = []
cluster_info = ''
search_body = {
'query': {
'bool': {
'must': [
{'match': {
'_id': esId
}}
]
}
}
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results) > 0):
result = results['hits']['hits'][0]['_source']
cluster_info = result['network_full_name']
for x_label in result['x_node_list']:
xValues.append(x_label['name'])
for y_label in result['y_node_list']:
yValues.append(y_label['name'])
data = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
data.append([correlation_record['x_loc'], correlation_record['y_loc'], correlation_value])
array = np.zeros((len(result['x_node_list']), len(result['y_node_list'])))
for row, col, val in data:
index = (row, col)
array[index] = val
zValues = array;
sample_mat = pd.DataFrame(data=zValues, # values
index=yValues, # 1st column as index
columns=xValues) # 1st row as the column names
numrows = len(sample_mat)
numcols = len(sample_mat.columns)
# check if symmetric
if numrows==numcols:
idx_to_node = dict(zip(range(len(sample_mat)),list(sample_mat.index)))
sample_mat = np.array(sample_mat)
sample_mat = sample_mat[::-1,::-1] # reverse the indices for use in graph creation
else:
zmat = np.array(sample_mat)
zmat = zmat[::-1,0:-1] # reverse the indices for use in graph creation
xlist = list(sample_mat.index)
ylist = list(sample_mat.columns)
zsym,xsym,ysym = symmetrize_matrix(zmat,xlist,ylist)
sample_mat = zsym
idx_to_node = dict(zip(range(len(sample_mat)),xlist))
#G_cluster = nx.Graph()
G_cluster = nx.from_numpy_matrix(np.abs(sample_mat))
G_cluster = nx.relabel_nodes(G_cluster,idx_to_node)
return {'cluster': G_cluster, 'cluster_info': cluster_info}
def symmetrize_matrix(zmat,xlist,ylist):
'''
Simple helper function to symmetrize an assymmetric matrix
inputs:
- zmat: a 2d matrix (dimensions r x c)
- xlist: ordered list of row names (length r)
- ylist: ordered list of column names (length c)
outputs:
- zsym: symmetrized matrix (dimensions (r+c) x (r+c))
- xsym: [xlist, ylist]
- ysim: [xlist, ylist] (Note: ysim = xsim)
'''
numrows,numcols = zmat.shape
# initialize the symmetric version of zmat
zsym = np.zeros((numrows+numcols,numrows+numcols))
# fill diagonal with 1s
np.fill_diagonal(zsym,1)
zsym[0:numrows,numrows:]=zmat
zsym[numrows:,0:numrows]=np.transpose(zmat)
xsym = []
xsym.extend(xlist)
xsym.extend(ylist)
ysym = []
ysym.extend(xlist)
ysym.extend(ylist)
return zsym,xsym,ysym
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchInferredDrugsTab.py",
"copies": "1",
"size": "22412",
"license": "mit",
"hash": -9033547962789414000,
"line_mean": 36.986440678,
"line_max": 412,
"alpha_frac": 0.5054881314,
"autogenerated": false,
"ratio": 3.5557670950341107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9520884400965481,
"avg_score": 0.008074165093725891,
"num_lines": 590
} |
__author__ = 'aarongary'
from app import PubMed
from app import elastic_search_uri
from collections import Counter
from elasticsearch import Elasticsearch
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300)
#============================
#============================
# DRUG SEARCH
#============================
#============================
def get_drug_network_search_mapped(queryTerms):
network_information = {
'searchGroupTitle': 'Cluster Network',
'searchTab': 'DRUG',
'network': 'drug_network',
'matchField': 'x_node_list.name',
'matchCoreNode': 'node_name',
'cancerType': 'BRCA',
'queryTerms': queryTerms
}
phenotype_network_data = drug_network_search_mapped(network_information)
#phenotype_network_data = drug_network_search_gene_centric(network_information)
return [phenotype_network_data]
def drug_network_search_mapped(network_info, disease=[]):
gene_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info['searchTab'],
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1',
'document_ids': [],
'inferred_drugs': [],
'overlap_counts': []
}
unsorted_items = []
gene_super_list = []
overlap_counts_array = []
overlap_found = False
queryTermArray = network_info['queryTerms'].split(',')
sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
gene_network_data['geneSuperList'] = get_geneSuperList(queryTermArray, sorted_query_list)
network_info['queryTerms'] = network_info['queryTerms'].replace(",", "*")
should_match = []
for queryTerm in queryTermArray:
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
should_match.append({"match": {"node_list.name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'size': 50
}
result = es.search(
index = 'drugs',
doc_type = 'drugs_drugbank',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
hitMax = 0
hitMin = 0
if(result['hits']['total'] < 1):
print 'no results'
for hit in result['hits']['hits']:
if(hitCount == 0):
hitMax = hit['_score']
else:
hitMin = hit['_score']
geneNeighborhoodArray = [];
scoreRankCutoff = 0.039
node_list_name_and_weight = []
for geneNodeHit in hit["_source"]["node_list"]:
geneNeighborhoodArray.append(geneNodeHit['name'])
x = [set(geneNeighborhoodArray), set(queryTermArray)]
y = set.intersection(*x)
emphasizeInfoArrayWithWeights = []
for genehit in y:
node_list_items = hit["_source"]["node_list"]
match = (item for item in node_list_items if item["name"] == genehit).next()
emphasizeInfoArrayWithWeights.append(match)
for gene_network_matched in y:
gene_super_list.append(gene_network_matched)
for match_this_overlap in overlap_counts_array:
if(gene_network_matched == match_this_overlap['gene']):
match_this_overlap['count'] += 1
overlap_found = True
break
if(not overlap_found):
overlap_counts_array.append(
{
'gene': gene_network_matched,
'count': 1
}
)
searchResultSummaryString = 'drugbank-' + str(hit["_source"]["degree"])
#searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["total_degree"])
hit_score = float(hit["_score"])
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"], #DrugBank.get_drugbank_synonym(hit["_source"]["node_name"]),
'diseaseType': '',
'clusterName': hit["_source"]["drugbank_id"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': list(y),
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'hit_id': hit['_id']
}
gene_network_data['document_ids'].append(hit['_id'])
unsorted_items.append(gene_network_data_items)
hitCount += 1
print hitCount
foundHit = False
for network_data_item in unsorted_items:
foundHit = False
for sortedID in sorted_query_list['results']:
if sortedID['id'] == network_data_item['clusterName']:
network_data_item['pubmedCount'] = sortedID['count']
network_data_item['searchResultScoreRank'] = sortedID['normalizedValue']
gene_network_data['items'].append(network_data_item)
foundHit = True
if(not foundHit):
network_data_item['pubmedCount'] = 0
network_data_item['searchResultScoreRank'] = 0
gene_network_data['items'].append(network_data_item)
counter_gene_list = Counter(gene_super_list)
for key, value in counter_gene_list.iteritems():
kv_item = {'queryTerm': key,
'boostValue': value}
#gene_network_data['geneSuperList'].append(kv_item)
#===============================
# GROUP DRUGS BY TARGETED GENE
#===============================
drug_gene_grouping = []
for drug_hit in gene_network_data['items']:
match_found = False
# After first item is already added (need to append to existing array)
for gene_loop_item in drug_gene_grouping:
if(len(drug_hit['emphasizeInfoArray']) > 0):
if(gene_loop_item['gene_name'] == drug_hit['emphasizeInfoArray'][0]):
gene_loop_item['searchResultTitle'].append({'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']})
match_found = True
# First item added
if(not match_found):
if(len(drug_hit['emphasizeInfoArray']) > 0):
drug_gene_grouping.append(
{
'gene_name': drug_hit['emphasizeInfoArray'][0],
'searchResultTitle': [{'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']}]
}
)
else:
drug_gene_grouping.append(
{
'gene_name': 'unknown',
'searchResultTitle': [{'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']}]
}
)
for drug_gene_no_count_item in drug_gene_grouping:
drug_gene_no_count_item['gene_count'] = len(drug_gene_no_count_item['searchResultTitle'])
#drug_gene_dumped = dumps(drug_gene_grouping)
gene_network_data['grouped_items'] = drug_gene_grouping
gene_network_data['overlap_counts'] = overlap_counts_array
return gene_network_data
def drug_network_search_gene_centric(network_info, disease=[]):
gene_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info['searchTab'],
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1',
'document_ids': [],
'inferred_drugs': []
}
unsorted_items = []
gene_super_list = []
queryTermArray = network_info['queryTerms'].split(',')
sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
gene_network_data['geneSuperList'] = get_geneSuperList(queryTermArray, sorted_query_list)
network_info['queryTerms'] = network_info['queryTerms'].replace(",", "*")
should_match = []
for queryTerm in queryTermArray:
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
should_match.append({"match": {"node_list.name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'size': 50
}
result = es.search(
index = 'drugs',
doc_type = 'drugs_drugbank',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
hitMax = 0
hitMin = 0
if(result['hits']['total'] < 1):
print 'no results'
search_results_for_grouping = []
for hit in result['hits']['hits']:
geneNeighborhoodArray = [];
for geneNodeHit in hit["_source"]["node_list"]:
geneNeighborhoodArray.append(geneNodeHit['name'])
x = [set(geneNeighborhoodArray), set(queryTermArray)]
y = set.intersection(*x)
emphasizeInfoArrayWithWeights = []
for genehit in y:
search_results_for_grouping.append(
{
'drugbank_id': hit['_source']['drugbank_id'],
'drug_name': hit['_source']['node_name'],
'gene_overlap': genehit,
'hit_id': hit['_id']
}
)
node_list_items = hit["_source"]["node_list"]
match = (item for item in node_list_items if item["name"] == genehit).next()
emphasizeInfoArrayWithWeights.append(match)
for gene_network_matched in y:
gene_super_list.append(gene_network_matched)
searchResultSummaryString = 'drugbank-' + str(hit["_source"]["degree"])
#searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["total_degree"])
hit_score = float(hit["_score"])
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"], #DrugBank.get_drugbank_synonym(hit["_source"]["node_name"]),
'diseaseType': '',
'clusterName': hit["_source"]["drugbank_id"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': list(y),
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'hit_id': hit['_id']
}
gene_network_data['document_ids'].append(hit['_id'])
unsorted_items.append(gene_network_data_items)
hitCount += 1
print hitCount
foundHit = False
for network_data_item in unsorted_items:
foundHit = False
for sortedID in sorted_query_list['results']:
if sortedID['id'] == network_data_item['clusterName']:
network_data_item['pubmedCount'] = sortedID['count']
network_data_item['searchResultScoreRank'] = sortedID['normalizedValue']
gene_network_data['items'].append(network_data_item)
foundHit = True
if(not foundHit):
network_data_item['pubmedCount'] = 0
network_data_item['searchResultScoreRank'] = 0
gene_network_data['items'].append(network_data_item)
counter_gene_list = Counter(gene_super_list)
for key, value in counter_gene_list.iteritems():
kv_item = {'queryTerm': key,
'boostValue': value}
#gene_network_data['geneSuperList'].append(kv_item)
#===============================
# GROUP DRUGS BY TARGETED GENE
#===============================
drug_gene_grouping = []
for drug_hit in gene_network_data['items']:
match_found = False
# After first item is already added (need to append to existing array)
for gene_loop_item in drug_gene_grouping:
if(len(drug_hit['emphasizeInfoArray']) > 0):
if(gene_loop_item['gene_name'] == drug_hit['emphasizeInfoArray'][0]):
gene_loop_item['searchResultTitle'].append({'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']})
match_found = True
# First item added
if(not match_found):
if(len(drug_hit['emphasizeInfoArray']) > 0):
drug_gene_grouping.append(
{
'gene_name': drug_hit['emphasizeInfoArray'][0],
'searchResultTitle': [{'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']}]
}
)
else:
drug_gene_grouping.append(
{
'gene_name': 'unknown',
'searchResultTitle': [{'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']}]
}
)
for drug_gene_no_count_item in drug_gene_grouping:
drug_gene_no_count_item['gene_count'] = len(drug_gene_no_count_item['searchResultTitle'])
#drug_gene_dumped = dumps(drug_gene_grouping)
gene_network_data['grouped_items'] = drug_gene_grouping
return gene_network_data
def get_geneSuperList(queryTermArray, sorted_query_list):
returnValue = []
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
#should_match.append( { 'match': {network_info['matchField']: queryTerm} })
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
#should_match.append({"match": {"node_list.node.name":{"query": queryTerm,"boost": boost_value_append}}})
returnValue.append({'queryTerm': queryTerm, 'boostValue': boost_value_append})
return returnValue
def get_boost_value(boostArray, idToCheck):
for boostItem in boostArray:
if(boostItem['id'] == idToCheck):
returnThisValue = boostItem['normalizedValue']
return boostItem['normalizedValue']
return 0
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchDrugsTab.py",
"copies": "1",
"size": "15395",
"license": "mit",
"hash": 4439462251718326000,
"line_mean": 32.7609649123,
"line_max": 122,
"alpha_frac": 0.5412146801,
"autogenerated": false,
"ratio": 3.9575835475578405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997777736027149,
"avg_score": 0.0042041734772700025,
"num_lines": 456
} |
__author__ = 'aarongary'
from collections import Counter
from app import PubMed
from app import elastic_search_uri
from app import util
from models.TermResolver import TermAnalyzer
from elasticsearch import Elasticsearch
import numpy as np
import networkx as nx
import pandas as pd
import community
import matplotlib.pyplot as plt
import pymongo
from io import StringIO
from pandas import DataFrame
import hashlib
from bson.json_util import dumps
#from RestBroker import start_thumbnail_generator
from app import SearchViz
from itertools import groupby
import difflib
import time
from elasticsearch_dsl import Search, Q
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300) # Prod Clustered Server
#es = Elasticsearch(['http://ec2-52-27-59-174.us-west-2.compute.amazonaws.com:9200/'],send_get_body_as='POST',timeout=300) # Prod Clustered Server
#es = Elasticsearch(['http://ec2-52-24-205-32.us-west-2.compute.amazonaws.com:9200/'],send_get_body_as='POST') # PROD
#es = Elasticsearch(['http://ec2-52-32-253-172.us-west-2.compute.amazonaws.com:9200/'],send_get_body_as='POST') # DEV
#============================
#============================
# CLUSTER SEARCH
#============================
#============================
def get_cluster_search_mapped(queryTerms, pageNumber=99):
network_information = {
'searchGroupTitle': 'Cluster Network',
'searchTab': 'PATHWAYS',
'diseases': [],
'network': 'clusters_tcga_louvain',
'matchField': 'x_node_list.name',
'matchCoreNode': 'node_name',
'cancerType': 'BRCA',
'queryTerms': queryTerms
}
if(pageNumber == 'undefined'):
pageNumber = 99
star_network_data = get_lazy_search(network_information, pageNumber)
return star_network_data
def get_cluster_search_with_disease_mapped(queryTerms, pageNumber=1, disease=None):
network_information = {
'searchGroupTitle': 'Cluster Network',
'searchTab': 'PATHWAYS',
'diseases': [],
'network': 'clusters_tcga_louvain',
'matchField': 'x_node_list.name',
'matchCoreNode': 'node_name',
'cancerType': 'BRCA',
'queryTerms': queryTerms
}
print disease
star_network_data = get_lazy_search(network_information, pageNumber, disease)
return star_network_data
def get_lazy_search(network_info, pageNumber=1, disease=[]):
computed_hash = util.compute_query_list_hash(network_info['queryTerms'])
#print computed_hash
if(pageNumber != 99):
from_page = (int(pageNumber) - 1) * 10
if(from_page < 0):
from_page = 0
else:
from_page = 0
client = pymongo.MongoClient()
db = client.cache
cluster_search = db.cluster_search
cluster_search_found = cluster_search.find_one({'searchId': computed_hash, 'disease_type': disease})
cached_hits = []
hit_ids = ''
hit_ids_with_disease = []
drugable_genes = ['GABRA1','GABRA2','GABRA3','GABRA4','GABRA5','GABRA6','CHRNA4','CHRNA7','GRIA2','GRIK2']
found_drugable_genes = []
top_overlap_documents = []
disease_list = []
hit_id_node_list = []
unsorted_items = []
annotation_array = []
annotation_dictionary = []
cluster_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info['searchTab'],
'diseases': [],
'items': [],
'grouped_items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1',
'annotation_filter_all': [],
'matrix_filter_all': [],
'disease_filter_all': [],
'hit_ids': '',
'hit_ids_with_disease': [],
'page_number': pageNumber
}
cluster_algorithm_hash = {
'clusters_tcga_oslom': 'OSLM',
'clusters_geo_oslom': 'OSLM',
'clusters_tcga_louvain': 'LOUV',
'clusters_tcga_ivanovska': 'IVAN',
'clusters_geo_louvain': 'LOUV'
}
other_cluster_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': 'OTHER_CLUSTERS',
'items': [],
'grouped_items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
other_annotation_dictionary = [{'group_title': 'Other', 'group_members': [], 'topGOId': 'NOGO', 'groupTopQValue': 0, 'topOverlap': 0, 'sort_order': 999, 'group_q_val_total': 0}] #
queryTermArray = network_info['queryTerms'].split(',')
sorted_query_list = [] #PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
cluster_network_data['geneSuperList'] = '' #get_geneSuperList(queryTermArray, sorted_query_list)
network_info['queryTerms'] = network_info['queryTerms'].replace(",", "*")
if(cluster_search_found is not None):
cached_hits = cluster_search_found['cached_hits']
top_overlap_documents = cluster_search_found['top_overlap_documents']
else:
sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
cluster_network_data['geneSuperList'] = get_geneSuperList(queryTermArray, sorted_query_list)
search_body = get_lazy_searchBody(queryTermArray, network_info, disease, sorted_query_list, False, 1) #pageNumber)
result = es.search(
index = 'clusters',
#doc_type = 'clusters_tcga_louvain', clusters_geo_oslom,
doc_type = ['clusters_geo_oslom', 'clusters_tcga_oslom'],
body = search_body
)
# s = Search(using=client, index="my-index") \
# .filter("term", network_name="uveal") \
# .query("match", title="python") \
# .query(~Q("match", description="beta"))
# response = s.execute()
tr = TermAnalyzer()
lucene_score_array = []
for hit in result['hits']['hits']:
lucene_score_array.append(hit['_score'])
lucene_score_max = max(lucene_score_array)
lucene_score_min = min(lucene_score_array)
for hit in result['hits']['hits']:
#hit_ids += hit['_id'] + ','
network_name_s = ''
gamma_s = ''
source_s = ''
network_type_s = ''
node_name_s = ''
q_value_i = ''
go_term_id = ''
gse_number = ''
#=============================================
# Because we are using a "Fields" search in
# ElasticSearch some of the fields may not
# exist in each search hit. Therefore we need
# to check each field first
#=============================================
if('network_name' in hit["fields"]):
if(len(hit["fields"]["network_name"]) > 0):
network_name_s = hit["fields"]["network_name"][0]
if('network_full_name' in hit["fields"]):
if(len(hit["fields"]["network_full_name"]) > 0):
network_full_name_s = hit["fields"]["network_full_name"][0]
#if(network_full_name_s not in disease_list):
disease_list.append(network_full_name_s)
if('gse_number' in hit["fields"]):
if(len(hit["fields"]["gse_number"]) > 0):
gse_number = hit["fields"]["gse_number"][0]
if('gamma' in hit["fields"]):
if(len(hit["fields"]["gamma"]) > 0):
gamma_s = hit["fields"]["gamma"][0]
if('source' in hit["fields"]):
if(len(hit["fields"]["source"]) > 0):
source_s = hit["fields"]["source"][0]
if('network_type' in hit["fields"]):
if(len(hit["fields"]["network_type"]) > 0):
network_type_s = hit["fields"]["network_type"][0]
if('node_name' in hit["fields"]):
if(len(hit["fields"]["node_name"]) > 0):
node_name_s = hit["fields"]["node_name"][0]
#========================================
# Get the top annotation from each hit.
# May not exist for all hits
#========================================
if('hypergeometric_scores.qvalueLog' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores.qvalueLog']) > 0):
q_value_i = hit['fields']['hypergeometric_scores.qvalueLog'][0]
if('hypergeometric_scores.GO_id' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores.GO_id']) > 0):
go_term_id = hit['fields']['hypergeometric_scores.GO_id'][0]
if('hypergeometric_scores.name' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores.name']) > 0):
if(hit['fields']['hypergeometric_scores.name'][0] not in annotation_array):
annotation_array.append(hit['fields']['hypergeometric_scores.name'][0])
annotation_dictionary.append({'group_title': hit['fields']['hypergeometric_scores.name'][0], 'topGOId': go_term_id, 'groupTopQValue': 0, 'topOverlap': 0, 'group_members': [], 'sort_order': 1, 'group_q_val_total': 0})
all_geneNeighborhoodArray = []
all_geneNeighborhoodStringList = ""
#========================
# Get term overlap
#========================
#Get All overlap
#print queryTermArray
all_nodes_array = []
for genehit in queryTermArray:
if(genehit in hit["fields"]["x_node_list.name"] or genehit in hit["fields"]["y_node_list.name"]):
all_geneNeighborhoodArray.append(genehit)
all_geneNeighborhoodStringList += genehit + ','
if(len(all_geneNeighborhoodStringList) > 0):
all_geneNeighborhoodStringList = all_geneNeighborhoodStringList[:-1]
#'emphasizeInfoArray': all_geneNeighborhoodArray,
#print hit['fields']
normalized_score = ((hit['_score'] - lucene_score_min)/(lucene_score_max - lucene_score_min)) * 10.0
if(normalized_score < 0.7):
normalized_score = 0.7
if('hypergeometric_scores.qvalueLog' in hit["fields"]):
cached_hits.append(
{
'_index': hit['_index'],
'_type': hit['_type'],
'_id': hit['_id'],
'_score': normalized_score,
'overlap_array': all_geneNeighborhoodArray,
'overlap_string': all_geneNeighborhoodStringList,
'y_count': str(len(hit["fields"]["y_node_list.name"])),
'x_count': str(len(hit["fields"]["x_node_list.name"])),
'fields': {
'hypergeometric_scores_qvalueLog': hit['fields']['hypergeometric_scores.qvalueLog'],
'hypergeometric_scores_name': hit['fields']['hypergeometric_scores.name'],
'gamma': hit['fields']['gamma'],
'source': hit['fields']['source'],
'hypergeometric_scores_GO_id': hit['fields']['hypergeometric_scores.GO_id'],
'network_name': hit['fields']['network_name'],
'network_full_name': hit['fields']['network_full_name'],
'gse_number': hit['fields']['gse_number'],
'network_type': hit['fields']['network_type']
}
}
)
else:
cached_hits.append(
{
'_index': hit['_index'],
'_type': hit['_type'],
'_id': hit['_id'],
'_score': (0.01 * len(all_geneNeighborhoodArray)),#normalized_score,
'overlap_array': all_geneNeighborhoodArray,
'overlap_string': all_geneNeighborhoodStringList,
'y_count': str(len(hit["fields"]["y_node_list.name"])),
'x_count': str(len(hit["fields"]["x_node_list.name"])),
'fields': {
'gamma': hit['fields']['gamma'],
'source': hit['fields']['source'],
'network_name': hit['fields']['network_name'],
'network_full_name': hit['fields']['network_full_name'],
'gse_number': hit['fields']['gse_number'],
'network_type': hit['fields']['network_type']
}
}
)
cached_hits = sorted(cached_hits, key=lambda k: k['_score'], reverse=True)
cluster_size_limit = 150
while((len(top_overlap_documents) < 1) and (cluster_size_limit < 1000)):
for all_hit in cached_hits:
network_full_name_s = 'none'
if('network_full_name' in all_hit["fields"]):
if(len(all_hit["fields"]["network_full_name"]) > 0):
network_full_name_s = all_hit["fields"]["network_full_name"][0]
#==================================================
# This section is temporary until we have a better
# way of ranking clusters for drug inferrance
#==================================================
if(int(all_hit['x_count']) < cluster_size_limit and int(all_hit['y_count']) < cluster_size_limit):
top_overlap_documents.append(
{
'doc_id': all_hit['_id'],
'doc_disease': network_full_name_s,
'doc_overlap': len(all_hit['overlap_array'])
}
)
cluster_size_limit += 50
client = pymongo.MongoClient()
db = client.cache
cluster_search = db.cluster_search
cluster_search.save(
{
'searchId': computed_hash,
'disease_type': disease,
'cached_hits': cached_hits,
'top_overlap_documents': top_overlap_documents
}
)
client.close()
#============================
# Compile the filter labels
#============================
annotation_filter_all = []
annotation_filter_all_go_id = []
matrix_filter_all = []
disease_filter_all = []
for hit in cached_hits:
q_log_value = 0.0
if('hypergeometric_scores_qvalueLog' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores_qvalueLog']) > 0):
q_log_value = hit['fields']['hypergeometric_scores_qvalueLog'][0]
else:
#=============================================
# If no qvaluelog then this is a
# non-annotated cluster. for filter
# purposes we give it a passing filter value
#=============================================
q_log_value = 99
if(q_log_value >= 0.5):
if('network_type' in hit["fields"]):
if(len(hit["fields"]["network_type"]) > 0):
if(hit["fields"]["network_type"][0].replace('_', ' ') not in matrix_filter_all):
matrix_filter_all.append(hit["fields"]["network_type"][0].replace('_', ' '))
if('network_full_name' in hit["fields"]):
if(len(hit["fields"]["network_full_name"]) > 0):
if(hit["fields"]["network_full_name"][0] not in disease_filter_all):
disease_filter_all.append(hit["fields"]["network_full_name"][0])
if('hypergeometric_scores_name' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores_name']) > 0):
if(hit["fields"]["hypergeometric_scores_name"][0] not in annotation_filter_all):
annotation_filter_all.append(hit["fields"]["hypergeometric_scores_name"][0])
annotation_filter_all_go_id.append(hit["fields"]["hypergeometric_scores_GO_id"][0])
else:
if('Other' not in annotation_filter_all):
annotation_filter_all.append('Other')
if(pageNumber != '99'):
search_these_cached_hits = cached_hits[from_page:from_page + 10]
else:
search_these_cached_hits = cached_hits
for hit in search_these_cached_hits: #result['hits']['hits']:
#hit_ids += hit['_id'] + ','
network_name_s = ''
gamma_s = ''
source_s = ''
network_type_s = ''
node_name_s = ''
q_value_i = ''
go_term_id = ''
gse_number = ''
#=============================================
# Because we are using a "Fields" search in
# ElasticSearch some of the fields may not
# exist in each search hit. Therefore we need
# to check and initialize those field
#=============================================
if('network_name' in hit["fields"]):
if(len(hit["fields"]["network_name"]) > 0):
network_name_s = hit["fields"]["network_name"][0]
if('network_full_name' in hit["fields"]):
if(len(hit["fields"]["network_full_name"]) > 0):
network_full_name_s = hit["fields"]["network_full_name"][0]
#if(network_full_name_s not in disease_list):
disease_list.append(network_full_name_s)
if('gse_number' in hit["fields"]):
if(len(hit["fields"]["gse_number"]) > 0):
gse_number = hit["fields"]["gse_number"][0]
if('gamma' in hit["fields"]):
if(len(hit["fields"]["gamma"]) > 0):
gamma_s = hit["fields"]["gamma"][0]
if('source' in hit["fields"]):
if(len(hit["fields"]["source"]) > 0):
source_s = hit["fields"]["source"][0]
if('network_type' in hit["fields"]):
if(len(hit["fields"]["network_type"]) > 0):
network_type_s = hit["fields"]["network_type"][0]
if('node_name' in hit["fields"]):
if(len(hit["fields"]["node_name"]) > 0):
node_name_s = hit["fields"]["node_name"][0]
#for x_genehit in hit["fields"]["x_node_list.name"]:
# if(x_genehit in drugable_genes):
# if(x_genehit not in found_drugable_genes):
# found_drugable_genes.append(x_genehit)
#for y_genehit in hit["fields"]["y_node_list.name"]:
# if(y_genehit in drugable_genes):
# if(y_genehit not in found_drugable_genes):
# found_drugable_genes.append(y_genehit)
all_geneNeighborhoodArray = hit['overlap_array'] #[]
all_geneNeighborhoodStringList = hit['overlap_string'] #""
#========================
# Get term overlap
#========================
#Get All overlap
all_nodes_array = []
#for genehit in queryTermArray:
# if(genehit in hit["fields"]["x_node_list.name"] or genehit in hit["fields"]["y_node_list.name"]):
# all_geneNeighborhoodArray.append(genehit)
# all_geneNeighborhoodStringList += genehit + ','
#if(len(all_geneNeighborhoodStringList) > 0):
# all_geneNeighborhoodStringList = all_geneNeighborhoodStringList[:-1]
#for genehit in hit["fields"]["x_node_list.name"]:
# all_nodes_array.append(genehit)
#for genehit in hit["fields"]["y_node_list.name"]:
# if(genehit not in all_nodes_array):
# all_nodes_array.append(genehit)
#========================
# END Get term overlap
#========================
#========================================
# Get the top annotation from each hit.
# May not exist for all hits
#========================================
if('hypergeometric_scores_qvalueLog' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores_qvalueLog']) > 0):
q_value_i = hit['fields']['hypergeometric_scores_qvalueLog'][0]
if('hypergeometric_scores_GO_id' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores_GO_id']) > 0):
go_term_id = hit['fields']['hypergeometric_scores_GO_id'][0]
if('hypergeometric_scores_name' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores_name']) > 0):
if(hit['fields']['hypergeometric_scores_name'][0] not in annotation_array):
annotation_array.append(hit['fields']['hypergeometric_scores_name'][0])
annotation_dictionary.append({'group_title': hit['fields']['hypergeometric_scores_name'][0], 'topGOId': go_term_id, 'groupTopQValue': 0, 'topOverlap': 0, 'group_members': [], 'sort_order': 1, 'group_q_val_total': 0})
#====================================
# create a list of nodes in this
# cluster to be used in dup compare
#====================================
hit_id_node_list.append(
{
'hit_id': hit['_id'],
'top_Q': q_value_i,
'query_nodes': [] #sorted(all_nodes_array)
}
)
gene_network_data_items = {
'md5hash': 'hash code place holder',
'searchResultTitle': node_name_s.replace('-','') + ' ' + network_name_s + ' ' + network_type_s + ' gamma:' + gamma_s + ' Q value: ' + str(q_value_i), #+ ' ' + searchResultTitle,
'QValue': str(q_value_i),
'Gamma': gamma_s,
'Source': source_s,
'hit_id': hit['_id'],
'diseaseType': network_full_name_s, #tr.get_cancer_description_by_id(network_name_s).replace(',',' '),
'gse_number': gse_number,
'dataSetType': network_type_s.replace('_', ' '),
'clusterName': node_name_s.replace('-',''),
'hypergeometricScores': [],
'searchResultScoreRank': hit["_score"],
'cluster_algorithm': cluster_algorithm_hash[hit["_type"]],
'luceneScore': hit["_score"],
'topQValue': q_value_i,
'topOverlap': len(all_geneNeighborhoodArray),
'y_count': hit['y_count'], #str(len(hit["fields"]["y_node_list.name"])),
'x_count': hit['x_count'], #str(len(hit["fields"]["x_node_list.name"])),
'searchResultScoreRankTitle': 'pubmed references ',
#'filterValue': str(len(hit["fields"]["y_node_list.name"])) + ' x ' + str(len(hit["fields"]["x_node_list.name"])),
'emphasizeInfoArray': all_geneNeighborhoodArray,
'overlapList': all_geneNeighborhoodStringList,
'x_emphasizeInfoArrayWithWeights': [],
'y_emphasizeInfoArrayWithWeights': [],
'pubmedCount': 0
}
#==================================================
# HEAT PROP
#==================================================
#cluster_x_y_z = SearchViz.load_x_y_z_cluster_data(hit['_id'])
#SearchViz.get_heat_prop_from_gene_list_by_cluster_source(all_geneNeighborhoodStringList,cluster_x_y_z)
#==================================================
# This section is temporary until we have a better
# way of ranking clusters for drug inferrance
#==================================================
# if(int(hit['x_count']) <= 200 and int(hit['y_count']) <= 200):
# top_overlap_documents.append(
# {
# 'doc_id': hit['_id'],
# 'doc_disease': network_full_name_s,
# 'doc_overlap': len(all_geneNeighborhoodArray)
# }
# )
#==================================================
# END temporary section
#==================================================
unsorted_items.append(gene_network_data_items)
annotation_found = False
for ann_dict_item in annotation_dictionary:
if('hypergeometric_scores_name' in hit['fields']):
if(len(hit['fields']['hypergeometric_scores_name']) > 0):
if(ann_dict_item['group_title'] == hit['fields']['hypergeometric_scores_name'][0]):
ann_dict_item['group_q_val_total'] += gene_network_data_items['topQValue']
ann_dict_item['group_members'].append(gene_network_data_items)
# cumulate the q value scores
annotation_found = True
if(not annotation_found):
for ann_dict_item in other_annotation_dictionary:
if(ann_dict_item['group_title'] == 'Other'):
ann_dict_item['group_members'].append(gene_network_data_items)
#print 'Process hits: ' + str(start_time - time.time())
#start_time = time.time()
#=================================================
# Calculate the average Q value for each group
# Also determine the highest Q value in the group
#=================================================
master_group_dups = []
master_group_dups_membership = []
for ann_dict_item in annotation_dictionary:
if(len(ann_dict_item['group_members']) > 0):
average_q_value = float(ann_dict_item['group_q_val_total']) / float(len(ann_dict_item['group_members']))
ann_dict_item['average_q_value'] = average_q_value
group_top_q_value_array = []
group_top_overlap_array = []
for group_item in ann_dict_item['group_members']:
group_top_q_value_array.append(group_item['topQValue'])
group_top_overlap_array.append(len(group_item['emphasizeInfoArray']))
#==============================================
# Find cluster dups DISABLED (see line below)
#==============================================
if(len(group_top_q_value_array) > 0 and ann_dict_item['group_title'] != 'Other'):
ann_dict_item['groupTopQValue'] = max(group_top_q_value_array)
dup_ids = [] #DISABLED determine_cluster_duplicates(group_top_q_value_array, hit_id_node_list)
if(len(dup_ids) > 0):
for dup in dup_ids:
if dup not in master_group_dups_membership:
master_group_dups_membership.append(dup)
ann_dict_item['topOverlap'] = max(group_top_overlap_array)
elif(ann_dict_item['group_title'] == 'Other'):
ann_dict_item['topOverlap'] = max(group_top_overlap_array)
else:
ann_dict_item['average_q_value'] = 0
#print master_group_dups_membership
#print 'Average Q: ' + str(start_time - time.time())
#start_time = time.time()
top_documents_sorted_list = sorted(top_overlap_documents, key=lambda k: k['doc_overlap'])
#print dumps(top_documents_sorted_list[-3:])
for top_docs in reversed(top_documents_sorted_list[-8:]):
hit_ids += top_docs['doc_id'] + ','
hit_ids_with_disease.append(
{
'id': top_docs['doc_id'],
'disease': top_docs['doc_disease']
}
)
if(len(hit_ids) > 0):
hit_ids = hit_ids[:-1]
#start_thumbnail_generator(hit_ids)
cluster_network_data['hit_ids'] = hit_ids
cluster_network_data['hit_ids_with_disease'] = hit_ids_with_disease
for ann_dict_item in annotation_dictionary:
replace_group_with_this = []
for group_item in ann_dict_item['group_members']:
if(group_item['hit_id'] in master_group_dups_membership):
#ann_dict_item['group_members'].remove(group_item)
group_item['Source'] += '_dup'
else:
replace_group_with_this.append(group_item)
ann_dict_item['group_members'] = replace_group_with_this
cluster_network_data['grouped_items'] = annotation_dictionary
return_disease_list = []
disease_list_grouped = [list(j) for i, j in groupby(sorted(disease_list))]
for disease_group in disease_list_grouped:
return_disease_list.append(
{
'disease_title': disease_group[0],
'count': len(disease_group)
}
)
cluster_network_data['diseases'] = return_disease_list
#print 'Other: ' + str(start_time - time.time())
#start_time = time.time()
if(len(other_annotation_dictionary) > 0):
cluster_network_data['grouped_items'].append(other_annotation_dictionary[0])
cluster_network_data['annotation_filter_all'] = annotation_filter_all
cluster_network_data['annotation_filter_all_go_id'] = annotation_filter_all_go_id
cluster_network_data['matrix_filter_all'] = matrix_filter_all
cluster_network_data['disease_filter_all'] = disease_filter_all
other_cluster_network_data['grouped_items'] = other_annotation_dictionary
#print 'Everything else: ' + str(start_time - time.time())
#print found_drugable_genes
return [cluster_network_data] #, other_cluster_network_data]
#========================================================================
# If 2 or more clusters share 95% of the same nodes we will flag them as
# duplicates. In a group of duplicates we will keep the first one and
# recommend removing the others
#========================================================================
def determine_cluster_duplicates(group_item_ids, group_items):
return_ids = []
simularity_array = [list(j) for i, j in groupby(sorted(group_item_ids))]
for simular_items in simularity_array:
group_dup_ids = []
if(len(simular_items) > 1):
compare_these_objs = []
last_list = []
last_id = ''
first_id = None
for compare_this in group_items:
if(compare_this['top_Q'] == simular_items[0]):
if first_id is None:
first_id = compare_this['hit_id']
#print compare_this['hit_id'] + ' query nodes size: ' + str(len(compare_this['query_nodes']))
this_list = compare_this['query_nodes']
this_id = compare_this['hit_id']
if(len(last_list) > 0):
sm=difflib.SequenceMatcher(None,last_list,this_list)
if(sm.ratio() > 0.95):
if(last_id not in return_ids):
group_dup_ids.append(last_id)
#print last_id
if(this_id not in return_ids):
group_dup_ids.append(this_id)
#print this_id
last_list = this_list
last_id = compare_this['hit_id']
for dup_item in group_dup_ids[1:]:
return_ids.append(dup_item)
#print simular_items[0]
return return_ids
def load_x_y_z_cluster_data(result_source):
#result_source = results['hits']['hits'][0]['_source']
xValues = []
yValues = []
result = result_source
for x_label in result['x_node_list']:
xValues.append(x_label['name'])
for y_label in result['y_node_list']:
yValues.append(y_label['name'])
data = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
data.append([correlation_record['x_loc'], correlation_record['y_loc'], correlation_value])
array = np.zeros((len(result['x_node_list']), len(result['y_node_list'])))
for row, col, val in data:
index = (row, col)
array[index] = val
zValues = array;
sample_mat = pd.DataFrame(data=zValues, # values
index=yValues, # 1st column as index
columns=xValues) # 1st row as the column names
idx_to_node = dict(zip(range(len(sample_mat)),list(sample_mat.index)))
sample_mat = np.array(sample_mat)
sample_mat = sample_mat[::-1,::-1] # reverse the indices for use in graph creation
G_cluster = nx.Graph()
G_cluster = nx.from_numpy_matrix(np.abs(sample_mat))
G_cluster = nx.relabel_nodes(G_cluster,idx_to_node)
return G_cluster
def cluster_search_mapped(network_info, disease=[]):
gene_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info['searchTab'],
'items': [],
'grouped_items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
unsorted_items = []
gene_super_list = []
annotation_array = []
annotation_dictionary = [{'group_title': 'Other', 'group_members': [], 'sort_order': 999}]
queryTermArray = network_info['queryTerms'].split(',')
sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
gene_network_data['geneSuperList'] = get_geneSuperList(queryTermArray, sorted_query_list)
network_info['queryTerms'] = network_info['queryTerms'].replace(",", "*")
search_body = get_searchBody(queryTermArray, network_info, disease, sorted_query_list, False)
result = es.search(
#index = 'network',
#doc_type = 'louvain_cluster',
index = 'clusters',
doc_type = 'clusters_tcga_louvain',
#doc_type = 'clusters_tcga_louvain, clusters_tcga_oslom',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
hitMax = 0
hitMin = 0
if(result['hits']['total'] < 1):
print 'no results'
tr = TermAnalyzer()
for hit in result['hits']['hits']:
if(len(hit['_source']['hypergeometric_scores']) > 0):
if(hit['_source']['hypergeometric_scores'][0]['name'] not in annotation_array):
annotation_array.append(hit['_source']['hypergeometric_scores'][0]['name'])
annotation_dictionary.append({'group_title': hit['_source']['hypergeometric_scores'][0]['name'], 'group_members': [], 'sort_order': 1})
if(hitCount == 0):
hitMax = hit['_score']
else:
hitMin = hit['_score']
all_geneNeighborhoodArray = [];
#========================
# Get term overlap
#========================
#Get All overlap
for genehit in hit["_source"]["query_node_list"]:
all_geneNeighborhoodArray.append(genehit['name'])
all_all = [set(all_geneNeighborhoodArray), set(queryTermArray)]
all_intersect = set.intersection(*all_all)
#========================
# END Get term overlap
#========================
searchResultSummaryString = hit["_source"]["source"] #+ '- [hypergeometric scores coming soon]' #+ hit["_source"]["hypergeometric_scores"]
searchResultTitle = ''
hg_q_log_val = 5.0
for hg in hit['_source']['hypergeometric_scores']:
if(hg['qvalueLog'] >= hg_q_log_val):
hg_q_log_val = hg['qvalueLog']
searchResultTitle = hg['name']
hit_score = float(hit["_score"])
#=================================================
# Compute a hash code to identify if 2 (or more)
# clusters are the same
#=================================================
hash_array = []
h = hashlib.new('ripemd160')
h.update(hit["_source"]["network_name"] + hit["_source"]["network_type"] + str(len(hit["_source"]["y_node_list"])) + 'x' + str(len(hit["_source"]["x_node_list"])))
all_geneNeighborhoodString = ''.join(sorted(all_geneNeighborhoodArray))
h.update(all_geneNeighborhoodString)
computed_hash = h.hexdigest()
#=================================================
# END HASH COMPUTE
#=================================================
hash_array.append(computed_hash)
gene_network_data_items = {
'md5hash': computed_hash,
'searchResultTitle': hit["_source"]["node_name"].replace('-','') + ' ' + hit["_source"]["network_name"] + ' ' + hit["_source"]["network_type"] + ' gamma:' + hit["_source"]["gamma"], #+ ' ' + searchResultTitle,
'hit_id': hit['_id'],
'diseaseType': tr.get_cancer_description_by_id(hit["_source"]["network_name"]).replace(',',' '),
'dataSetType': hit["_source"]["network_type"].replace('_', ' '),
'clusterName': hit["_source"]["node_name"].replace('-',''),
'searchResultSummary': searchResultSummaryString,
'hypergeometricScores': hit['_source']['hypergeometric_scores'],
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': str(len(hit["_source"]["y_node_list"])) + ' x ' + str(len(hit["_source"]["x_node_list"])),
'emphasizeInfoArray': set(all_intersect),
'x_emphasizeInfoArrayWithWeights': [],
'y_emphasizeInfoArrayWithWeights': [],
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0
}
unsorted_items.append(gene_network_data_items)
gene_network_data['items'].append(gene_network_data_items)
annotation_found = False
for ann_dict_item in annotation_dictionary:
if(len(hit['_source']['hypergeometric_scores']) > 0):
if(ann_dict_item['group_title'] == hit['_source']['hypergeometric_scores'][0]['name']):
ann_dict_item['group_members'].append(gene_network_data_items) #hit['_id']) #hit["_source"]["node_name"].replace('-','') + ' ' + hit["_source"]["network_name"] + ' ' + hit["_source"]["network_type"] + ' gamma:' + hit["_source"]["gamma"] + ' ' + searchResultTitle)
annotation_found = True
if(not annotation_found):
for ann_dict_item in annotation_dictionary:
if(ann_dict_item['group_title'] == 'Other'):
mystr1 = ann_dict_item['group_members'].append(gene_network_data_items) #hit['_id']) #hit["_source"]["node_name"].replace('-','') + ' ' + hit["_source"]["network_name"] + ' ' + hit["_source"]["network_type"] + ' gamma:' + hit["_source"]["gamma"] + ' ' + searchResultTitle)
hitCount += 1
print hitCount
#for network_data_item in unsorted_items:
gene_network_data['grouped_items'] = annotation_dictionary
# foundHit = False
# for network_data_item in unsorted_items:
# foundHit = False
# for sortedID in sorted_query_list['results']:
# if sortedID['id'] == network_data_item['clusterName']:
# network_data_item['pubmedCount'] = sortedID['count']
# network_data_item['searchResultScoreRank'] = sortedID['normalizedValue']
# gene_network_data['items'].append(network_data_item)
# foundHit = True
# if(not foundHit):
# network_data_item['pubmedCount'] = 0
# network_data_item['searchResultScoreRank'] = 0
# gene_network_data['items'].append(network_data_item)
counter_gene_list = Counter(gene_super_list)
for key, value in counter_gene_list.iteritems():
kv_item = {'queryTerm': key,
'boostValue': value}
#gene_network_data['geneSuperList'].append(kv_item)
return gene_network_data
def get_document_overlaps(queryTerms, elasticId):
queryTermArray = queryTerms.split(',')
search_body = {
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results) > 0):
result = results['hits']['hits'][0]['_source']
data = {
'corr': [],
'group_id': [],
'p': [],
'var1': [],
'var2': []
}
print 'got results'
main_data_tuples = []
#===================================
# GENERATE NODES & EDGES DATAFRAME
# BASED ON EDGE CUT_OFF
#===================================
for correlation_record in result['correlation_matrix']:
data['corr'].append(correlation_record['correlation_value'])
data['group_id'].append(elasticId)
data['p'].append(correlation_record['p_value'])
data['var1'].append(result['x_node_list'][correlation_record['x_loc']]['name'])
data['var2'].append(result['y_node_list'][correlation_record['y_loc']]['name'])
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'], result['y_node_list'][correlation_record['y_loc']]['name'], correlation_record['correlation_value']))
#print 'x1'
#print main_data_tuples
# edge_list_1 = [(data['var1'], data['var2'], data['corr'])]
# Gsmall_1 = nx.Graph()
#print 'x2'
# Gsmall_1.add_weighted_edges_from(edge_list_1)
#print 'x3'
# for query_term in queryTermArray:
# print Gsmall_1.neighbors(query_term)
#print 'edge list created'
#edge_list_df = pd.DataFrame(data, columns=['corr', 'group_id','p','var1','var2'])
#print 'data frame created'
Gsmall = nx.Graph()
#print 'edge list created 4'
Gsmall.add_weighted_edges_from(main_data_tuples)
#print 'edges added'
for query_term in queryTermArray:
print Gsmall.neighbors(query_term)
#group_ids = np.unique(edge_list_df['group_id'])
#for focal_group in group_ids:
# idx_group = (edge_list_df['group_id']==focal_group)
# idx_group = list(edge_list_df['group_id'][idx_group].index)
# make a network out of it
# print 'edge list created 2'
# edge_list = [(edge_list_df['var1'][i], edge_list_df['var2'][i], edge_list_df['corr'][i]) for i in idx_group if edge_list_df['corr'][i] !=0]
# print 'edge list created 3'
# Gsmall = nx.Graph()
# print 'edge list created 4'
# Gsmall.add_weighted_edges_from(edge_list)
# print 'edges added'
# for query_term in queryTermArray:
# print Gsmall.neighbors(query_term)
def get_searchBody(queryTermArray, network_info, disease, sorted_query_list, isStarSearch):
should_match = []
must_match = []
returnBody = {}
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
if(boost_value_append < 0.0001):
boost_value_append = 0.0001
if(isStarSearch):
should_match.append({"match": {"node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
should_match.append( { 'match': {'node_name': queryTerm} })
#should_match.append( { 'match': {'node_list.node.name': queryTerm} })
else:
#should_match.append({"match": {"query_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
should_match.append({"match": {"x_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
should_match.append({"match": {"y_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
if len(disease) > 0:
diseaseWithSpaces = '';
for addThisDisease in disease:
if len(diseaseWithSpaces) < 1:
diseaseWithSpaces = addThisDisease
else:
diseaseWithSpaces = diseaseWithSpaces + ' ' + addThisDisease
must_match.append({'match': {'network_name': diseaseWithSpaces}})
else:
must_match.append({"match": {"network_name": "LAML ACC BLCA LGG BRCA CESC CHOL COAD ESCA FPPP GBM HNSC KICH KIRC KIRP LIHC LUAD LUSC DLBC MESO OV PAAD PCPG PRAD READ SARC SKCM STAD TGCT THYM THCA UCS UCEC UVM"}})
# REMOVING disease matching until we get that information back in the documents
if(isStarSearch):
returnBody = {
# 'sort' : [
# '_score'
# ],
'query': {
'bool': {
#'must': must_match,
'should': should_match
}
},
'size': 15
}
else:
returnBody = {
# 'sort' : [
# '_score'
# ],
'query': {
'bool': {
#'must': must_match,
'should': should_match
}
},
'size': 12
}
return returnBody
def get_lazy_searchBody(queryTermArray, network_info, disease, sorted_query_list, isStarSearch, pageNumber=1):
should_match = []
should_match_experiment =[]
must_match_experiment = []
filter_match = []
must_match = []
returnBody = {}
if(pageNumber != 99):
from_page = (int(pageNumber) - 1) * 40
if(from_page < 0):
from_page = 0
else:
from_page = 0
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
if(boost_value_append < 0.001):
boost_value_append = 0.001
if(isStarSearch):
should_match.append({"match": {"node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
should_match.append( { 'match': {'node_name': queryTerm} })
#should_match.append( { 'match': {'node_list.node.name': queryTerm} })
else:
#should_match_experiment.append({"constant_score": {"query":{"match": {"query_node_list.name":queryTerm}}}})
should_match_experiment.append({"constant_score": {"filter":{"term": {"query_node_list.name":queryTerm}}}})
should_match.append({"match": {"query_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
filter_match.append(queryTerm)
#should_match.append({"match": {"x_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
#should_match.append({"match": {"y_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
if len(disease) > 0:
diseaseWithSpaces = '';
for addThisDisease in disease:
must_match_experiment.append({"constant_score": {"filter":{"term": {"network_name":addThisDisease.lower()}}}})
if len(diseaseWithSpaces) < 1:
diseaseWithSpaces = addThisDisease
else:
diseaseWithSpaces = diseaseWithSpaces + ' ' + addThisDisease
must_match.append({'match': {'network_name': diseaseWithSpaces}})
print 'Filter disease: '
print disease
else:
must_match.append({"match": {"network_name": "LAML ACC BLCA LGG BRCA CESC CHOL COAD ESCA FPPP GBM HNSC KICH KIRC KIRP LIHC LUAD LUSC DLBC MESO OV PAAD PCPG PRAD READ SARC SKCM STAD TGCT THYM THCA UCS UCEC UVM"}})
experimental_search_body2 = {}
if(len(disease) > 0):
experimental_search_body2 = {
'fields': ['network_type', 'network_name', 'network_full_name', 'gse_number', 'gamma', 'hypergeometric_scores.name', 'hypergeometric_scores.qvalueLog', 'hypergeometric_scores.GO_id', 'x_node_list.name', 'y_node_list.name', 'source'],
'size': 40,
'from': from_page,
'query': {
'filtered': {
'filter': {'term': {'network_name': disease}},
'query': {
'function_score': {
'query': {
'bool': {
'should': should_match_experiment
}
},
'field_value_factor': {
'field': 'max_annotation_conf',
'factor': 0.00001,
'modifier': 'log2p',
'missing': 1
},
'score_mode': 'sum',
'boost_mode': 'sum'
}
}
}
}
}
returnBody = {
#'sort': [{'_score': {'order': 'desc'}}, {'hypergeometric_scores.qvalueLog': {'order': 'desc'}}],
'fields': ['network_type', 'network_name', 'network_full_name', 'gse_number', 'gamma', 'hypergeometric_scores.name', 'hypergeometric_scores.qvalueLog', 'hypergeometric_scores.GO_id', 'x_node_list.name', 'y_node_list.name', 'source'],
'filtered':{
'query' : {
'bool': {
'should': should_match
}
},
'filter': must_match
},
'from': from_page,
'size': 10
}
#s = Search.from_dict(experimental_search_body2)
#s.filter('term', network_name='uveal')
#experimental_search_body2 = s.to_dict()
#print dumps(experimental_search_body2)
else:
returnBodyx = {
'fields': ['network_type', 'network_name', 'network_full_name', 'gse_number', 'gamma', 'hypergeometric_scores.name', 'hypergeometric_scores.qvalueLog', 'hypergeometric_scores.GO_id', 'x_node_list.name', 'y_node_list.name', 'source'],
'query': {
'filtered': {
'query': {
'function_score': {
'query': {
'bool': {
'should': should_match_experiment
}
},
'functions': [
{
'script_score': {
'file': 'pathwayFunctions'
}
}
],
'score_mode': 'multiply'
},
'filter': {
'bool': {
'should': [
{
'terms': {
'query_node_list.name': filter_match
}
}
]
}
}
}
}
},
'highlight': {
'pre_tags': [''],
'post_tags': [''],
'fields': {
'query_node_list.name': {}
}
},
'from': from_page,
'size': 10
}
experimental_search_body2 = {
'highlight': {
'pre_tags': [''],
'fields': {
'query_node_list.name': {}
},
'post_tags': ['']
},
'fields': ['network_type', 'network_name', 'network_full_name', 'gse_number', 'gamma', 'hypergeometric_scores.name', 'hypergeometric_scores.qvalueLog', 'hypergeometric_scores.GO_id', 'x_node_list.name', 'y_node_list.name', 'source'],
'size': 40,
'from': from_page,
'query': {
'function_score': {
'query': {
'bool': {
'should': should_match_experiment
}
},
'field_value_factor': {
'field': 'max_annotation_conf',
'factor': 0.00001,
'modifier': 'log2p',
'missing': 1
},
'score_mode': 'sum',
'boost_mode': 'sum'
}
}
}
returnBody = {
#'sort': [{'_score': {'order': 'desc'}}, {'hypergeometric_scores.qvalueLog': {'order': 'desc'}}],
'fields': ['network_type', 'network_name', 'network_full_name', 'gse_number', 'gamma', 'hypergeometric_scores.name', 'hypergeometric_scores.qvalueLog', 'hypergeometric_scores.GO_id', 'x_node_list.name', 'y_node_list.name', 'source'],
'query' : {
'bool': {
'should': should_match
}
},
'from': from_page,
'size': 10
}
return experimental_search_body2 #returnBody
def convert_network_type(network_type):
switcher = {
'mutation_vs_mutation': 'DNA x DNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
}
return switcher.get(network_type, network_type)
def get_geneSuperList(queryTermArray, sorted_query_list):
returnValue = []
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
#should_match.append( { 'match': {network_info['matchField']: queryTerm} })
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
#should_match.append({"match": {"node_list.node.name":{"query": queryTerm,"boost": boost_value_append}}})
returnValue.append({'queryTerm': queryTerm, 'boostValue': boost_value_append})
return returnValue
def get_boost_value(boostArray, idToCheck):
for boostItem in boostArray:
if(boostItem['id'] == idToCheck):
returnThisValue = boostItem['normalizedValue']
return boostItem['normalizedValue']
return 0.0001
def get_heatmap_graph_from_es_by_id(elasticId, gene_list, search_type, cut_off_filter_value):
client = pymongo.MongoClient()
db = client.cache
gene_list_array = gene_list.split(',')
query_list_found = []
heat_map_graph = db.heat_map_graph
heat_map_found = heat_map_graph.find_one({'clusterId': elasticId})
if(heat_map_found is not None):
return heat_map_found['heat_map']
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
doc_type = search_type,
body = search_body
)
if(len(results['hits']['hits']) > 0):
result = results['hits']['hits'][0]['_source']
calculated_cut_off = 0.5
#if(len(result['correlation_matrix']) > 1000):
# calculated_cut_off = 0.87
#else:
# calculated_cut_off = 0.5 + (0.37 * (len(result['correlation_matrix'])/1000))
cluster_IO = "corr group_id p var1 var2 \n"
x_matrix_width = len(result['x_node_list'])-1
y_matrix_width = len(result['y_node_list'])-1
data = {
'corr': [],
'group_id': [],
'p': [],
'var1': [],
'var2': []
}
calculated_cut_off = get_top_200_weight(result['correlation_matrix'])
for gene_list_item in gene_list_array:
x_node_index = findIndexByKeyValue(result['x_node_list'], 'name', gene_list_item)
if(x_node_index >= 0):
query_list_found.append(gene_list_item)
else:
y_node_index = findIndexByKeyValue(result['y_node_list'], 'name', gene_list_item)
if(y_node_index >= 0):
query_list_found.append(gene_list_item)
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
if( (abs(correlation_value) >= calculated_cut_off)): #or (correlation_record['x_loc'] in x_query_list_found) or (correlation_record['y_loc'] in y_query_list_found) ):
#if( (correlation_record['x_loc'] in x_query_list_found) and abs(correlation_value) < calculated_cut_off ):
# x_query_list_found.remove(correlation_record['x_loc'])
#if( (correlation_record['y_loc'] in y_query_list_found) and abs(correlation_value) < calculated_cut_off ):
# y_query_list_found.remove(correlation_record['y_loc'])
#if(abs(correlation_value) >= calculated_cut_off):
data['corr'].append(correlation_value)
data['group_id'].append(elasticId)
data['p'].append(correlation_record['p_value'])
data['var1'].append(result['x_node_list'][correlation_record['x_loc']]['name'])
data['var2'].append(result['y_node_list'][correlation_record['y_loc']]['name'])
#===============================
# CUTOFF was too stringent.
# no edges produced. Recompute
#===============================
if(len(data['corr']) < 1):
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
if((abs(correlation_value) >= 0.5)):
data['corr'].append(correlation_value)
data['group_id'].append(elasticId)
data['p'].append(correlation_record['p_value'])
data['var1'].append(result['x_node_list'][correlation_record['x_loc']]['name'])
data['var2'].append(result['y_node_list'][correlation_record['y_loc']]['name'])
df = pd.DataFrame(data, columns=['corr', 'group_id','p','var1','var2'])
result = generate_graph_from_tab_file(df, 'temp', elasticId, 'community')
for xy_item in query_list_found:
foundItem = False
for node in result['heat_map']['nodes']:
if(node['id'] == xy_item):
foundItem = True
break
if(not foundItem):
result['heat_map']['nodes'].append(
{
'bfrac': 80,
'degree': 0,
'gfrac': 80,
'rfrac': 80,
'com': 999,
'id': xy_item
}
)
return result['heat_map']
print("Got %d Hits:" % results['hits']['total'])
if(results['hits']['total'] < 1):
print 'no results'
#return return_value
def findIndexByKeyValue(obj, key, value):
mystr = ''
for i in range(0,len(obj)):
if (obj[i][key] == value):
return i
return -1
def get_top_200_weight(cor_matrix):
cor_values_array = [];
for correlation_record in cor_matrix:
cor_values_array.append(correlation_record['correlation_value'])
cor_values_array_sorted = sorted(cor_values_array, reverse=True)[:200]
#print cor_values_array_sorted
print cor_values_array_sorted[-1]
return cor_values_array_sorted[-1]
def get_enrichment_from_es_by_id(elasticId):
search_body = {
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results) > 0):
result = results['hits']['hits'][0]['_source']
hyper_geo_results = result['hypergeometric_scores']
if(len(hyper_geo_results) > 0):
return hyper_geo_results;
else:
return [{
'name': 'no results found',
'GO_id': '',
'pvalue': 0,
'qvalueLog': 0,
'overlap': 0,
'genes_from_list': 0,
'genes_from_GO': 0,
'description': 'no results found'
}]
def generate_graph_from_tab_file(cluster_IO,out_file_start, cluster_id, color_type='community', colormap='OrRd'):
'''
this function takes a processed cluster file ('input_file_name': output of process clustering results in cluster_analysis_module)
and saves a json file for every community in the file, starting with 'out_file_start'. 'input_file_name' and 'out_file_start'
should be prepended with location.
'''
# first load in a network (edge list)
edge_list_df = cluster_IO #pd.read_csv(cluster_IO,sep='\t')
group_ids = np.unique(edge_list_df['group_id'])
for focal_group in group_ids:
print focal_group
#save_file_name = out_file_start + '_' + str(int(focal_group)) + '.json'
idx_group = (edge_list_df['group_id']==focal_group)
idx_group = list(edge_list_df['group_id'][idx_group].index)
# make a network out of it
edge_list = [(edge_list_df['var1'][i], edge_list_df['var2'][i], np.abs(edge_list_df['corr'][i])) for i in idx_group if edge_list_df['corr'][i] !=0]
#edge_list = [(edge_list_df['var1'][i], edge_list_df['var2'][i], edge_list_df['corr'][i]) for i in idx_group if edge_list_df['corr'][i] !=0]
Gsmall = nx.Graph()
Gsmall.add_weighted_edges_from(edge_list)
nodes = Gsmall.nodes()
numnodes = len(nodes)
edges=Gsmall.edges(data=True)
numedges = len(edges)
if color_type=='community':
partition = community.best_partition(Gsmall)
partition = pd.Series(partition)
col_temp = partition[Gsmall.nodes()]
# Set up json for saving
# what should the colors be??
num_communities = len(np.unique(col_temp))
color_list = plt.cm.gist_rainbow(np.linspace(0, 1, num_communities))
# blend the community colors (so that to-nodes are a mixture of all the communities they belong to)
rfrac,gfrac,bfrac=calc_community_fraction(Gsmall,Gsmall.nodes(),Gsmall.nodes(),partition,color_list)
#nodes_dict = [{"id":n,"com":col_temp[n],"degree":author_gene_bp.degree(n)} for n in nodes]
nodes_dict = [{"id":n,"com":col_temp[n],"degree":Gsmall.degree(n),
"rfrac":rfrac[n]*255,"gfrac":gfrac[n]*255,"bfrac":bfrac[n]*255} for n in nodes]
elif color_type=='clustering_coefficient':
cmap= plt.get_cmap(colormap)
rfrac,gfrac,bfrac=calc_clustering_coefficient(Gsmall,cmap)
nodes_dict = [{"id":n,"com":0,"degree":Gsmall.degree(n),
"rfrac":rfrac[n]*255,"gfrac":gfrac[n]*255,"bfrac":bfrac[n]*255} for n in nodes]
elif color_type=='betweenness_centrality':
cmap= plt.get_cmap(colormap)
rfrac,gfrac,bfrac=calc_betweenness_centrality(Gsmall,cmap)
nodes_dict = [{"id":n,"com":0,"degree":Gsmall.degree(n),
"rfrac":rfrac[n]*255,"gfrac":gfrac[n]*255,"bfrac":bfrac[n]*255} for n in nodes]
# partition = community.best_partition(Gsmall)
# partition = pd.Series(partition)
# col_temp = partition[Gsmall.nodes()]
# Set up json for saving
# what should the colors be??
# num_communities = len(np.unique(col_temp))
# color_list = plt.cm.gist_rainbow(np.linspace(0, 1, num_communities))
# blend the community colors (so that to-nodes are a mixture of all the communities they belong to)
# rfrac,gfrac,bfrac=calc_community_fraction(Gsmall,Gsmall.nodes(),Gsmall.nodes(),partition,color_list)
# save network in json format
# nodes = Gsmall.nodes()
# numnodes = len(nodes)
# edges=Gsmall.edges(data=True)
# numedges = len(edges)
#nodes_dict = [{"id":n,"com":col_temp[n],"degree":author_gene_bp.degree(n)} for n in nodes]
# nodes_dict = [{"id":n,"com":col_temp[n],"degree":Gsmall.degree(n),
# "rfrac":rfrac[n]*255,"gfrac":gfrac[n]*255,"bfrac":bfrac[n]*255} for n in nodes]
node_map = dict(zip(nodes,range(numnodes))) # map to indices for source/target in edges
edges_dict = [{"source":node_map[edges[i][0]], "target":node_map[edges[i][1]], "weight":edges[i][2]['weight']} for i in range(numedges)]
#==========================================
# Restore the correlation sign (+/-)
#
# create a list of edges that should be
# negative using [source, target] format
#==========================================
edge_list_negative_values = [(node_map[edge_list_df['var1'][i]], node_map[edge_list_df['var2'][i]]) for i in idx_group if edge_list_df['corr'][i] < 0]
for edge_dict_item in edges_dict:
for e_neg in edge_list_negative_values:
if(e_neg[0] == edge_dict_item['source'] and e_neg[1] == edge_dict_item['target']) or (e_neg[1] == edge_dict_item['source'] and e_neg[0] == edge_dict_item['target']):
edge_dict_item['weight'] = -1 * np.abs(edge_dict_item['weight'])
import json
json_graph = {"directed": False, "nodes": nodes_dict, "links":edges_dict}
print 'Edge count: ' + str(len(edges_dict))
client = pymongo.MongoClient()
db = client.cache
heat_maps = db.heat_map_graph
a = {
'clusterId': focal_group,
'heat_map': json_graph#json.dumps(json_graph) #heat_map_ordered_transposed
}
#heat_maps.save(a)
return a
#json.dump(json_graph,open(save_file_name,'w'))
def calc_clustering_coefficient(G,cmap):
# this function calculates the clustering coefficient of each node, and returns colors corresponding to these values
local_CC = nx.clustering(G,G.nodes())
local_CC_scale = [round(local_CC[key]*float(255)) for key in local_CC.keys()]
local_CC_scale = pd.Series(local_CC_scale,index=G.nodes())
rfrac = [cmap(int(x))[0] for x in local_CC_scale]
gfrac = [cmap(int(x))[1] for x in local_CC_scale]
bfrac = [cmap(int(x))[2] for x in local_CC_scale]
rfrac = pd.Series(rfrac,index=G.nodes())
gfrac = pd.Series(gfrac,index=G.nodes())
bfrac = pd.Series(bfrac,index=G.nodes())
return rfrac,gfrac,bfrac
def calc_betweenness_centrality(G,cmap):
# this function calculates the betweenness centrality of each node, and returns colors corresponding to these values
local_BC = nx.betweenness_centrality(G)
local_BC_scale = [round(local_BC[key]*float(255)) for key in local_BC.keys()]
local_BC_scale = pd.Series(local_BC_scale,index=G.nodes())
rfrac = [cmap(int(x))[0] for x in local_BC_scale]
gfrac = [cmap(int(x))[1] for x in local_BC_scale]
bfrac = [cmap(int(x))[2] for x in local_BC_scale]
rfrac = pd.Series(rfrac,index=G.nodes())
gfrac = pd.Series(gfrac,index=G.nodes())
bfrac = pd.Series(bfrac,index=G.nodes())
return rfrac,gfrac,bfrac
# this function calculates fraction of to-node connections belonging to each community
def calc_community_fraction(G,to_nodes,from_nodes, from_nodes_partition, color_list):
# set color to most populous community
degree = G.degree(to_nodes)
rfrac,gfrac,bfrac=pd.Series(index=G.nodes()),pd.Series(index=G.nodes()),pd.Series(index=G.nodes())
for t in to_nodes:
t_neighbors = G.neighbors(t)
t_comms = [from_nodes_partition[i] for i in t_neighbors]
t_comms = pd.Series(t_comms)
unique_comms = t_comms.unique()
num_unique_comms = len(unique_comms)
num_n = pd.Series(index=unique_comms)
for n in unique_comms:
num_n[n] = sum(t_comms==n)
# find max num_n
color_max = color_list[num_n.argmax()][0:3]
# how much is shared by other colors?
#print(num_n)
frac_shared = 1-np.max(num_n)/np.sum(num_n)
# darken the color by this amount
#color_dark = shade_color(color_max,-frac_shared*100)
color_dark = (color_max[0]*(1-frac_shared), color_max[1]*(1-frac_shared), color_max[2]*(1-frac_shared))
rfrac[t] = color_dark[0]
gfrac[t] = color_dark[1]
bfrac[t] = color_dark[2]
# fill in the from_nodes colors
for f in from_nodes:
f_group = from_nodes_partition[f]
rfrac[f] = color_list[f_group][0]
gfrac[f] = color_list[f_group][1]
bfrac[f] = color_list[f_group][2]
return rfrac,gfrac,bfrac
def get_all_cluster_ids():
search_body = {
'fields': ['_id'],
'query' : {
'match_all': {}
},
'size': 70000
}
result = es.search(
index = 'clusters',
doc_type = 'clusters_tcga_louvain',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
hit_ids = ''
hit_ids_array = []
for hit in result['hits']['hits']:
hit_ids += '"' + hit['_id'] + '",'
hit_ids_array.append(hit['_id'])
if(len(hit_ids) > 0):
hit_ids = hit_ids[:-1]
of = open('all_cluster_ids.json', 'w')
i = 0
for cluster_id in hit_ids_array:
of.write(cluster_id + '\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write(hit_ids + '\n')
of.close()
#print hit_ids_array
def get_remaining_thumbnails():
all_thumbs = [];
with open("thumbs_master_list.txt", "r") as f:
for line in f:
all_thumbs.append(line)
partial_thumbs = [];
with open("thumbs_partial_list.txt", "r") as f:
for line in f:
partial_thumbs.append(line)
#all_all = [set(all_thumbs), set(partial_thumbs)]
leftovers = list(set(all_thumbs) - set(partial_thumbs))
print len(leftovers)
of = open('thumbnails_leftovers.txt', 'w')
i = 0
id_array_string = ''
for cluster_id in leftovers:
id_array_string += '"' + cluster_id.rstrip() + '",'
of.write(id_array_string)
of.close()
def transform_matrix_to_graph():
my_matrix = [
[-0.37340000000000001, 0.0, -0.37459999999999999, -0.39660000000000001, -0.4123, -0.36890000000000001],
[0.0, 0.31709999999999999, 0.31340000000000001, 0.34210000000000002, 0.0, 0.32429999999999998],
[-0.32690000000000002, -0.40189999999999998, 0.50939999999999996, 0.54959999999999998, 0.57179999999999997, 0.57179999999999997],
[-0.31030000000000002, -0.41099999999999998, -0.38729999999999998, 0.0, -0.30990000000000001, 0.0],
[0.30869999999999997, 0.0, 0.30330000000000001, 0.0, 0.0, 0.0, 0.34000000000000002, 0.0, 0.34210000000000002],
[-0.43240000000000001, -0.3896, -0.39900000000000002, 0.74729999999999996, 0.85919999999999996, 0.78000000000000003]
]
for d1 in my_matrix:
for d2 in d1:
d2 = 200 - d2 * 100
my_matrix2 = [
[1, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 0]
]
A = np.array(my_matrix2)
G = nx.DiGraph(A)
x_nodes = ['A','B','C','D','E','F',]
def test_cluster_search_helper(expected_annotation):
tr = TermAnalyzer()
#expected_annotation = 'GO:0050877'
client = pymongo.MongoClient()
db = client.go
go_search = db.genes
go_search_found = go_search.find({'go': expected_annotation})
query_genes = ''
if go_search_found is not None:
for go_gene in go_search_found:
termsClassified = tr.identify_term(go_gene['gene'])
for item_type in termsClassified:
if(item_type['type'] == 'GENE'):
if('geneSymbol' in item_type):
termGeneSymbol = item_type['geneSymbol']
query_genes += termGeneSymbol.upper() + ','
if(len(query_genes) > 0):
query_genes = query_genes[0:-1]
test_template = {
'query_genes': query_genes,
'expected_annotation': expected_annotation
}
search_results = get_cluster_search_mapped(query_genes, 99)
print dumps(search_results)
return_result = False;
for search_tab in search_results:
if(search_tab['searchTab'] == 'PATHWAYS'):
if(expected_annotation in search_tab['annotation_filter_all_go_id']):
return_result = True;
print('found it')
print search_tab['annotation_filter_all_go_id']
else:
return_result = False;
print('No')
print search_tab['annotation_filter_all_go_id']
return return_result
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchPathwaysTab.py",
"copies": "1",
"size": "74638",
"license": "mit",
"hash": -3359700570773812700,
"line_mean": 38.3660337553,
"line_max": 293,
"alpha_frac": 0.5175245853,
"autogenerated": false,
"ratio": 3.7806706514031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47981952367031,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from collections import Counter
from app import PubMed
from models.TermResolver import TermAnalyzer
from elasticsearch import Elasticsearch
from app import elastic_search_uri
from app import path_to_cluster_file
from app import path_to_DB_file
from app import util
import numpy as np
import networkx as nx
import pandas as pd
import community
import math
import matplotlib.pyplot as plt
import pymongo
import plotly.plotly as py
from plotly.graph_objs import *
from io import StringIO
from pandas import DataFrame
import hashlib
from bson.json_util import dumps
#from RestBroker import start_thumbnail_generator
import matplotlib.pyplot as plt
import matplotlib.colors as mpclrs
import seaborn
import random
import json
import time
from multiprocessing import Pool, cpu_count, Manager, Process
import copy
from random import randint
from app import HeatMaps
from ndex.networkn import NdexGraph
from ndex.client import Ndex
# latex rendering of text in graphs
import matplotlib as mpl
mpl.rc('text', usetex = False)
mpl.rc('font', family = 'serif')
import app.datascience.drug_gene_heatprop
import imp
imp.reload(app.datascience.drug_gene_heatprop)
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=30) # Prod Clustered Server
#============================
#============================
# CLUSTER VIZ
#============================
#============================
def convert_network_type(network_type):
switcher = {
'mutation_vs_mutation': 'DNA x DNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
'rnaseq_vs_rnaseq': 'RNA x RNA',
}
return switcher.get(network_type, network_type)
def get_geneSuperList(queryTermArray, sorted_query_list):
returnValue = []
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
#should_match.append( { 'match': {network_info['matchField']: queryTerm} })
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
#should_match.append({"match": {"node_list.node.name":{"query": queryTerm,"boost": boost_value_append}}})
returnValue.append({'queryTerm': queryTerm, 'boostValue': boost_value_append})
return returnValue
def get_boost_value(boostArray, idToCheck):
for boostItem in boostArray:
if(boostItem['id'] == idToCheck):
returnThisValue = boostItem['normalizedValue']
return boostItem['normalizedValue']
return 0
def get_heat_prop_cluster_viz(seed_genes, esId):
seed_genes_array = seed_genes.split(',')
es_id_array = esId.split(',')
cluster_x_y_z = load_x_y_z_cluster_data(es_id_array[0])
cluster_heatprop_targets = app.datascience.drug_gene_heatprop.cluster_genes_heatprop(seed_genes_array,cluster_x_y_z)
if(len(cluster_heatprop_targets.edges()) > 3):
print 'heat prop'
main_data_tuples = []
edge_list_negative_values = []
for (u,v,d) in cluster_heatprop_targets.edges(data=True):
main_data_tuples.append((u, v, np.abs(d['weight'])))
if(d['weight'] < 0):
edge_list_negative_values.append(u, v)
result = generate_graph_from_tab_file(main_data_tuples, edge_list_negative_values, 'temp', esId, 'clustering_coefficient') #'community')
return result['heat_map']
else:
print 'no heat prop'
return get_heatmap_graph_from_es_by_id(es_id_array[0], seed_genes, 'clusters_tcga_louvain', 0.5)
def get_heat_prop_from_gene_list_by_cluster_source(seed_genes, cluster_x_y_z):
seed_genes_array = seed_genes.split(',')
gene_drug_df = app.datascience.drug_gene_heatprop.drug_gene_heatprop(seed_genes_array,cluster_x_y_z,plot_flag=False)
#print gene_drug_df.head(25)
gene_drug_json = gene_drug_df.reset_index().to_dict(orient='index')
return gene_drug_json
def get_drugbank_name(db_id, drugbank_mongo_collection):
drug_bank_found = drugbank_mongo_collection.find_one({'drug_bank_id': db_id})
drug_bank_desc = ''
if(drug_bank_found is not None):
drug_bank_desc = drug_bank_found['drug_desc']
else:
drug_bank_desc = db_id
return drug_bank_desc
def experiment_1(seed_genes, esIds):
computed_hash = util.compute_query_list_hash(seed_genes)
#print computed_hash
return_value_array = []
client = pymongo.MongoClient()
db = client.identifiers
drugbank_collection = db.drugbank
inferred_drug_search = db.inferred_drug_search
inferred_drug_search = client.cache.inferred_drug_search
inferred_drug_search_found = inferred_drug_search.find_one({'searchId': computed_hash})
if(inferred_drug_search_found is not None):
return_value_array = inferred_drug_search_found['cached_hits']
else:
seed_genes_array = seed_genes.split(',')
es_id_array = esIds.split(',')
start_time = time.time()
print 'Start: ' + str(start_time)
print '-'
print '-'
print '-'
#================================
# Run the heat prop in parallel
#================================
#manager = Manager()
#return_dict = manager.dict()
#jobs = []
#for es_id in es_id_array:
# p = Process(target=get_heat_prop_from_es_id, args=(es_id, seed_genes_array, False, return_dict))
# jobs.append(p)
# p.start()
#for proc in jobs:
# proc.join()
#inferred_drug_group_array = return_dict.values()
return_dict = []
inferred_drug_group_array = {}
count = 0
for es_id in es_id_array:
# Due to bugs when calling ElasticSearch from Process we are
# just running the first 2 clusters in serial
if(count > 1):
if(len([rd for rd in return_dict if rd is not None]) > 1):
break;
return_dict.append(get_heat_prop_from_es_id(es_id, seed_genes_array, False, None))
count += 1
inferred_drug_group_array = [rd for rd in return_dict if rd is not None]
if(len(inferred_drug_group_array) < 1):
return [
{
"disease_type": "No results",
"genes": [
"NONE"
],
"gene_count": 1,
"value": [
{
"drug_name": "n/a",
"gene": "NONE",
"doc_id": "0",
"drug_id": "DB00000"
}
],
"heat_value": 0.004182,
"key": "No results",
"es_id": "2040020397",
"drug_bank_id": "DB00000",
"diseases_with_rank": [
{
"disease": "No results",
"rank": 18
}
],
"heat_rank": 18
}
]
#print dumps(inferred_drug_group_array)
merged_by_rank = []
annotate_cluster_info = {}
for a in inferred_drug_group_array:
for b in a['inferred_drugs']:
found_match = False
for c in merged_by_rank:
if(c['drug_bank_id'] == b['drug_bank_id']):
disease_type_found = False
for d in c['diseases_with_rank']:
if(d['disease'] == b['disease_type']):
disease_type_found = True
if(not disease_type_found):
c['diseases_with_rank'].append(
{
'disease': b['disease_type'],
'rank': b['heat_rank']
}
)
found_match = True
if(not found_match):
b1 = copy.deepcopy(b)
b1['diseases_with_rank'] = [
{
'disease': b1['disease_type'],
'rank': b1['heat_rank']
}
]
merged_by_rank.append(b1)
annotate_cluster_info[a['es_id']] = a['annotate_cluster_data']
return_value_array = {'inferred_drugs': merged_by_rank, 'annotate_cluster_info': annotate_cluster_info} #return_dict.values()
print '-'
print '-'
print '-'
print 'Finished: ' + str(start_time - time.time())
inferred_drug_search.save(
{
'searchId': computed_hash,
'cached_hits': return_value_array
}
)
client.close()
return return_value_array
def get_heat_prop_from_gene_list_loop(seed_genes, esIds):
seed_genes_array = seed_genes.split(',')
es_id_array = esIds.split(',')
return_value_array = []
start_time = time.time()
print 'Start: ' + str(start_time)
print '-'
print '-'
print '-'
for es_id in es_id_array:
return_value = get_heat_prop_from_es_id(es_id, seed_genes_array)
if(return_value is not None):
return return_value
print '-'
print '-'
print '-'
print 'Finished: ' + str(start_time - time.time())
return return_value_array
def get_heat_prop_from_es_id(es_id, seed_genes_array, include_evidence_graph=False, return_dict=None):
client = pymongo.MongoClient()
db = client.identifiers
drugbank_collection = db.drugbank
print es_id
disease_type = get_cluster_disease_by_es_id(es_id)
cluster_data = load_x_y_z_cluster_data(es_id)
cluster_x_y_z = cluster_data['cluster']
print 'start heat prop'
gene_drug_df = app.datascience.drug_gene_heatprop.drug_gene_heatprop(seed_genes_array,cluster_x_y_z,plot_flag=False)
print 'finish heat prop'
gene_drug_json = gene_drug_df.reset_index().to_dict(orient='index')
#print dumps(gene_drug_json)
one_gene_many_drugs = []
hot_genes = []
hot_genes_with_heat_value = []
hot_genes_values = []
annotate_cluster_data = []
annotate_cluster_min = 9999
annotate_cluster_max = 0
for key, value in gene_drug_json.iteritems():
if(len(value['drugs']) > 0 and value['index'] not in seed_genes_array):
if(value['heat_value'] > 0.00001):
one_gene_many_drugs.append({
'gene': value['index'],
'drugs': value['drugs'],
'heat_value': float("{0:f}".format(value['heat_value'])),
'heat_rank': value['heat_rank']
})
annotate_cluster_data.append({
'gene': value['index'],
'drugable': len(value['drugs']) > 0,
'heat_value': value['heat_value']
})
if(value['heat_value'] < annotate_cluster_min):
annotate_cluster_min = value['heat_value']
if(value['heat_value'] > annotate_cluster_max):
annotate_cluster_max = value['heat_value']
if(value['heat_value'] > 0.00001):
hot_genes.append(value['index'])
drugs_array_desc = []
drugs_for_pop_up = ''
node_info_for_pop_up = ''#'<span style="font-weight: bold; margin-bottom: 5px;">Drugs associated with ' + str(value['index']) + ':</span><br><div style="margin-left: 10px;">'
for drug_id in value['drugs']:
drugs_array_desc.append(get_drugbank_name(drug_id, drugbank_collection))
drugs_for_pop_up += get_drugbank_name(drug_id, drugbank_collection) + '\n'
node_info_for_pop_up += get_drugbank_name(drug_id, drugbank_collection) + '<br>'
node_info_for_pop_up += '</div>'
#print 'gene_id: ' + value['index'] + ' heat: ' + str(value['heat_value'] * 100000)
if(value['index'] not in seed_genes_array):
hot_genes_values.append(value['heat_value'] * 100000)
hot_genes_with_heat_value.append(
{
'gene_id': value['index'],
'heat_value': value['heat_value'] * 100000,
'drugable': len(drugs_for_pop_up) > 0,
'seed_gene': False,
'drugs': 'do not use',#value['index'] + '\nDrugs targeting this gene:\n\n' + drugs_for_pop_up,
'node_info': value['index'],
'pop_up_info': node_info_for_pop_up
}
)
else:
hot_genes_with_heat_value.append(
{
'gene_id': value['index'],
'heat_value': value['heat_value'] * 100000,
'drugable': len(drugs_for_pop_up) > 0,
'seed_gene': True,
'drugs': 'do not use',#value['index'] + '\n[SEED GENE]\nFor drugs that are directly \ntargeting this query gene \nsee results above', # This is a seed gene. We are not showing direct drugs only inferred drugs.
'node_info': value['index'],
'pop_up_info': node_info_for_pop_up
}
)
else:
hot_genes_with_heat_value.append(
{
'gene_id': value['index'],
'seed_gene': False, # seed genes are intrinsically hot
'heat_value': 0.0
}
)
#print annotate_cluster_min
#annotate_cluster_min = math.log(annotate_cluster_min)
#annotate_cluster_max = math.log(annotate_cluster_max)
if((annotate_cluster_max > annotate_cluster_min) and (annotate_cluster_max > 0)):
for annotate_item in annotate_cluster_data:
annotate_item['normalized_heat'] = (((annotate_item['heat_value']) * 1.0) - (annotate_cluster_min * 1.0)) / ((annotate_cluster_max * 1.0) - (annotate_cluster_min * 1.0))
else:
for annotate_item in annotate_cluster_data:
annotate_item['normalized_heat'] = 1.0
if(len(hot_genes_values) < 1):
max_hot_genes_value = 0
else:
max_hot_genes_value = max(hot_genes_values)
#========================================
# Results are one to many with drugname
# as the key. We need to group by gene
#========================================
if(len(one_gene_many_drugs) < 2):
return None
else:
one_drug_many_genes = []
for gene_drugs in one_gene_many_drugs:
found_drug = False
for drug in gene_drugs['drugs']:
for match_this_drug in one_drug_many_genes:
if(drug == match_this_drug['drug_bank_id']):
drug_bank_desc = get_drugbank_name(drug, drugbank_collection)
match_this_drug['genes'].append(gene_drugs['gene'])
max_heat_map_value = match_this_drug['heat_value']
if(gene_drugs['heat_value'] > max_heat_map_value):
max_heat_map_value = gene_drugs['heat_value']
max_heat_rank_value = match_this_drug['heat_rank']
if(gene_drugs['heat_rank'] > max_heat_rank_value):
max_heat_rank_value = gene_drugs['heat_rank']
match_this_drug['gene_count'] += 1
match_this_drug['heat_value'] = max_heat_map_value
match_this_drug['heat_rank'] = max_heat_rank_value
match_this_drug['value'].append({
'drug_name': drug_bank_desc,
'gene': gene_drugs['gene'],
'doc_id': '0',
'drug_id': drug
})
found_drug = True
break
if(not found_drug):
drug_bank_desc = get_drugbank_name(drug, drugbank_collection)
one_drug_many_genes.append({
'drug_bank_id': drug,
'heat_value': float("{0:f}".format(gene_drugs['heat_value'])),
'heat_rank': gene_drugs['heat_rank'],
'genes': [gene_drugs['gene']],
'value': [
{
'drug_name': drug_bank_desc,
'gene': gene_drugs['gene'],
'doc_id': '0',
'drug_id': drug
}
],
'key': drug_bank_desc,
'gene_count': 1,
'disease_type': disease_type,
'es_id': es_id
})
top_drugs_sorted_list = one_drug_many_genes #sorted(one_drug_many_genes, key=lambda k: k['gene_count'])
H = cluster_x_y_z.subgraph(hot_genes)
nodes = H.nodes()
numnodes = len(nodes)
edges=H.edges(data=True)
numedges = len(edges)
print 'Edges len: ' + str(len(edges))
color_list = plt.cm.OrRd(np.linspace(0, 1, 100))
inferred_drug_graph = {
'directed':False,
'nodes':[],
'links':[]
}
if(include_evidence_graph):
nodes_dict = []
log_norm_value = mpclrs.LogNorm(0, max_hot_genes_value, clip=False)
for n in nodes:
found_hot_gene = False
for hv in hot_genes_with_heat_value:
if(hv['gene_id'] == n):
color_list_index = math.ceil((hv['heat_value']/max_hot_genes_value) * 100) - 1
font_color = 'black'
if(color_list_index > 99):
color_list_index = 99
if(color_list_index > 40):
font_color = 'white'
if(hv['drugable']):
nodes_dict.append({"id":n,"com":0, "node_type": "DRUGABLE", "drugs": hv['drugs'], 'node_info': hv['node_info'], 'pop_up_info': hv['pop_up_info'], 'seed_gene': hv['seed_gene'], 'font_color': font_color, "degree":H.degree(n),"rfrac":int(color_list[color_list_index][0] * 255),"gfrac":int(color_list[color_list_index][1] * 255),"bfrac":int(color_list[color_list_index][2] * 255)} )
else:
nodes_dict.append({"id":n,"com":0, "node_type": "NORMAL", "drugs": [], 'node_info': hv['node_info'], 'pop_up_info': 'No drugs available for this gene', 'seed_gene': hv['seed_gene'], 'font_color': font_color, "degree":H.degree(n),"rfrac":int(color_list[color_list_index][0] * 255),"gfrac":int(color_list[color_list_index][1] * 255),"bfrac":int(color_list[color_list_index][2] * 255)} )
found_hot_gene = True
break
if(not found_hot_gene):
nodes_dict.append({"id":n,"com":0,"degree":H.degree(n),"rfrac":color_list[0][0] * 255,"gfrac":color_list[0][1] * 255,"bfrac":color_list[0][2] * 255} )
node_map = dict(zip(nodes,range(numnodes))) # map to indices for source/target in edges
edges_dict = [{"source":node_map[edges[i][0]], "target":node_map[edges[i][1]], "weight":edges[i][2]['weight']} for i in range(numedges)]
inferred_drug_graph = {
'directed':False,
'nodes':nodes_dict,
'links':edges_dict
}
if(return_dict is not None):
return_dict[es_id] = es_id
if(top_drugs_sorted_list is not None):
if(len(top_drugs_sorted_list) >= 25):
if(return_dict is not None):
return_dict[es_id] = {'inferred_drugs': top_drugs_sorted_list[:24], 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type, 'annotate_cluster_data': annotate_cluster_data, 'es_id': es_id}
return {'inferred_drugs': top_drugs_sorted_list[:24], 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type, 'annotate_cluster_data': annotate_cluster_data, 'es_id': es_id}
else:
if(return_dict is not None):
return_dict[es_id] = {'inferred_drugs': top_drugs_sorted_list, 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type, 'annotate_cluster_data': annotate_cluster_data, 'es_id': es_id}
return {'inferred_drugs': top_drugs_sorted_list, 'evidence_graph': inferred_drug_graph, 'disease_type': disease_type, 'annotate_cluster_data': annotate_cluster_data, 'es_id': es_id}
else:
if(return_dict is not None):
return_dict[es_id] = {'inferred_drugs': [{'nodes': [], 'edges':[]}], 'evidence_graph': {'directed':False,'nodes':[],'links':[]}, 'disease_type': disease_type, 'annotate_cluster_data': annotate_cluster_data, 'es_id': es_id}
return {'inferred_drugs': [{'nodes': [], 'edges':[]}], 'evidence_graph': {'directed':False,'nodes':[],'links':[]}, 'disease_type': disease_type, 'annotate_cluster_data': annotate_cluster_data, 'es_id': es_id}
def get_cluster_disease_by_es_id(es_id):
search_body = {
'fields': [
'network_full_name'
],
'query': {
'bool': {
'must': [
{ 'match': {'_id': es_id} }
]
}
}
}
result = es.search(
index = 'clusters',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(len(result['hits']['hits']) > 0):
hit = result['hits']['hits'][0]
if('network_full_name' in hit["fields"]):
if(len(hit["fields"]["network_full_name"]) > 0):
return hit["fields"]["network_full_name"][0]
else:
return 'unknown'
def get_heat_prop_from_gene_list(seed_genes, esId):
seed_genes_array = seed_genes.split(',')
es_id_array = esId.split(',')
top_drugs_sorted_list = None
for es_id in es_id_array:
cluster_x_y_z = load_x_y_z_cluster_data(es_id)
gene_drug_df = app.datascience.drug_gene_heatprop.drug_gene_heatprop(seed_genes_array,cluster_x_y_z,plot_flag=False)
#print gene_drug_df.head(25)
gene_drug_json = gene_drug_df.reset_index().to_dict(orient='index')
one_gene_many_drugs = []
for key, value in gene_drug_json.iteritems():
if(len(value['drugs']) > 0):
one_gene_many_drugs.append({
'gene': value['index'],
'drugs': value['drugs'],
'heat_value': value['heat_value']
})
#=========================================
# Results are "one to many" with drugname
# as the key. We need to group by gene
#=========================================
client = pymongo.MongoClient()
db = client.identifiers
drugbank_collection = db.drugbank
one_drug_many_genes = []
for gene_drugs in one_gene_many_drugs:
found_drug = False
for drug in gene_drugs['drugs']:
for match_this_drug in one_drug_many_genes:
if(drug == match_this_drug['drug_bank_id']):
drug_bank_found = drugbank_collection.find_one({'drug_bank_id': drug})
drug_bank_desc = ''
if(drug_bank_found is not None):
drug_bank_desc = drug_bank_found['drug_desc']
else:
drug_bank_desc = drug
match_this_drug['genes'].append(gene_drugs['gene'])
max_heat_map_value = match_this_drug['heat_value']
if(gene_drugs['heat_value'] > max_heat_map_value):
max_heat_map_value = gene_drugs['heat_value']
match_this_drug['gene_count'] += 1
match_this_drug['heat_value'] = max_heat_map_value
match_this_drug['value'].append({
'drug_name': drug_bank_desc,
'gene': gene_drugs['gene'],
'doc_id': '0',
'drug_id': drug
})
found_drug = True
break
if(not found_drug):
drug_bank_found = drugbank_collection.find_one({'drug_bank_id': drug})
drug_bank_desc = ''
if(drug_bank_found is not None):
drug_bank_desc = drug_bank_found['drug_desc']
else:
drug_bank_desc = drug
one_drug_many_genes.append({
'drug_bank_id': drug,
'heat_value': gene_drugs['heat_value'],
'genes': [gene_drugs['gene']],
'value': [
{
'drug_name': drug_bank_desc,
'gene': gene_drugs['gene'],
'doc_id': '0',
'drug_id': drug
}
],
'key': drug_bank_desc,
'gene_count': 1
})
# {
# 'drug': drug,
# 'genes': [gene_drugs['gene']],
# 'drug_bank_desc': drug_bank_desc
# })
top_drugs_sorted_list = sorted(one_drug_many_genes, key=lambda k: k['gene_count'])
#print top_drugs_sorted_list[-15:]
#print dumps(one_drug_many_genes)
return_value = []
return top_drugs_sorted_list[-25:]
def get_heatmap_graph_from_es_by_id(elasticId, gene_list, search_type, cut_off_filter_value):
filter_count_setting1 = 400
client = pymongo.MongoClient()
db = client.cache
gene_list_array = gene_list.split(',')
query_list_found = []
x_edge_overlap = []
y_edge_overlap = []
x_edge_non_overlap = []
y_edge_non_overlap = []
heat_map_graph = db.heat_map_graph
heat_map_found = heat_map_graph.find_one({'clusterId': elasticId})
if(heat_map_found is not None):
return heat_map_found['heat_map']
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
# doc_type = search_type,
body = search_body
)
if(len(results['hits']['hits']) > 0):
result = results['hits']['hits'][0]['_source']
#if(result['correlation_matrix_degree'] > 300000):
if(len(result['correlation_matrix']) < 1):
resultx = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}]
}
return resultx
calculated_cut_off = 0.5
#if(len(result['correlation_matrix']) > 1000):
# calculated_cut_off = 0.87
#else:
# calculated_cut_off = 0.5 + (0.37 * (len(result['correlation_matrix'])/1000))
cluster_IO = "corr group_id p var1 var2 \n"
x_matrix_width = len(result['x_node_list'])-1
y_matrix_width = len(result['y_node_list'])-1
data = {
'corr': [],
'group_id': [],
'p': [],
'var1': [],
'var2': []
}
calculated_cut_off = get_top_200_weight(result['correlation_matrix'])
print calculated_cut_off
#=================================
# GET QUERY LIST OVERLAP
#=================================
for gene_list_item in gene_list_array:
x_node_index = findIndexByKeyValue(result['x_node_list'], 'name', gene_list_item)
y_node_index = findIndexByKeyValue(result['y_node_list'], 'name', gene_list_item)
if(x_node_index >= 0):
query_list_found.append(gene_list_item + '_' + result['x_node_list_type'])
else:
if(y_node_index >= 0):
query_list_found.append(gene_list_item + '_' + result['y_node_list_type'])
x_edge_overlap.append(x_node_index)
#===================================
# GET QUERY LIST OVERLAP FOR EDGES
# i.e. EDGE INDEX from:0 to:1
#===================================
if(x_node_index >= 0):
x_edge_overlap.append(x_node_index)
if(y_node_index >= 0):
y_edge_overlap.append(y_node_index)
#===================================
# GENERATE NODES & EDGES DATAFRAME
# BASED ON EDGE CUT_OFF
#===================================
main_data_tuples = []
all_data_tuples = []
query_nodes_data_tuples = []
subG_nodes_data_tuples = []
found_x_query_node_edge = []
found_y_query_node_edge = []
core_nodes = []
edge_counter = 0
edge_list_negative_values = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
all_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
if( (abs(correlation_value) >= calculated_cut_off) and edge_counter < filter_count_setting1): #150):
edge_counter += 1
#if(edge_counter % 20 == 0):
# print edge_counter
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
if(result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'] not in core_nodes):
core_nodes.append(result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'])
if(result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'] not in core_nodes):
core_nodes.append(result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'])
if(correlation_value < 0):
edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
else:
if((correlation_record['x_loc'] in x_edge_overlap) or (correlation_record['y_loc'] in y_edge_overlap)):
#edge_counter += 1
#if(edge_counter % 20 == 0):
# print edge_counter
#main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'], result['y_node_list'][correlation_record['y_loc']]['name'], np.abs(correlation_record['correlation_value'])))
query_nodes_data_tuples.append(correlation_record)
if(correlation_value < 0):
edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
print 'main_data_tuples: ' + str(len(main_data_tuples))
for correlation_record in query_nodes_data_tuples:
correlation_value = correlation_record['correlation_value']
if((correlation_record['x_loc'] in x_edge_overlap) and (correlation_record['x_loc'] not in found_x_query_node_edge)):
found_x_query_node_edge.append(correlation_record['x_loc'])
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
if((correlation_record['y_loc'] in y_edge_overlap) and (correlation_record['y_loc'] not in found_y_query_node_edge)):
found_y_query_node_edge.append(correlation_record['y_loc'])
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
#===============================
# CUTOFF was too stringent.
# no edges produced. Recompute
#===============================
#if(len(data['corr']) < 1):
if(len(main_data_tuples) < 1):
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
if((abs(correlation_value) >= 0.5)):
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['y_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
if(correlation_value < 0):
edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
#subG = nx.Graph()
#subG.add_nodes_from()
#subG.add_weighted_edges_from(main_data_tuples)
#subG_minimum = nx.minimum_spanning_tree(subG)
#subG_nodes = subG.nodes()
#mainG = nx.Graph()
#mainG.add_weighted_edges_from(all_data_tuples)
#main_subG = mainG.subgraph(subG_nodes)
#for edges_subG in subG_minimum.edges(data=True):
# subG_nodes_data_tuples.append((edges_subG[0], edges_subG[1], edges_subG[2]['weight']))
result = generate_graph_from_tab_file(main_data_tuples, edge_list_negative_values, gene_list_array, elasticId, 'clustering_coefficient')
#result = generate_graph_from_tab_file(subG_nodes_data_tuples, edge_list_negative_values, gene_list_array, elasticId, 'community')
#================================
# INSERT QUERY NODE IF NOT FOUND
# IN RESULTING NETWORK
#================================
for xy_item in query_list_found:
foundItem = False
for node in result['heat_map']['nodes']:
if(node['id'] == xy_item):
foundItem = True
break
if(not foundItem):
result['heat_map']['nodes'].append(
{
'bfrac': 207,
'degree': 0,
'gfrac': 120,
'rfrac': 0,
'com': 999,
'id': xy_item
}
)
return result['heat_map']
else:
result = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}]
}
return result
print("Got %d Hits:" % results['hits']['total'])
if(results['hits']['total'] < 1):
print 'no results'
#return return_value
def filter_edges_to_tuples(result, calculated_cut_off, x_edge_overlap, y_edge_overlap, number_of_edges, for_heat_prop_use=False):
main_data_tuples = []
all_data_tuples = []
query_nodes_data_tuples = []
edge_counter = 0
non_core_counts = {}
edge_list_negative_values = []
#calculated_cut_off = 0.5
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
all_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
if( (abs(correlation_value) >= calculated_cut_off) and edge_counter < number_of_edges):
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
edge_counter += 1
#print 'Core: ' + str(edge_counter) + ' out of ' + str(number_of_edges)
if(correlation_value < 0):
edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
else:
if((correlation_record['x_loc'] in x_edge_overlap) or (correlation_record['y_loc'] in y_edge_overlap)):
max_reached = False
if(correlation_record['x_loc'] in non_core_counts):
non_core_counts[correlation_record['x_loc']] += 1
if((non_core_counts[correlation_record['x_loc']] > 10) and not for_heat_prop_use):
max_reached = True
elif(correlation_record['y_loc'] in non_core_counts):
non_core_counts[correlation_record['y_loc']] += 1
if(non_core_counts[correlation_record['y_loc']] > 10 and not for_heat_prop_use):
max_reached = True
elif((correlation_record['x_loc'] in x_edge_overlap)):
non_core_counts[correlation_record['x_loc']] = 1
elif((correlation_record['y_loc'] in y_edge_overlap)):
non_core_counts[correlation_record['y_loc']] = 1
#print 'Non-core: ' + str(edge_counter)
if(not max_reached):
edge_counter += 1
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
query_nodes_data_tuples.append(correlation_record)
if(correlation_value < 0):
edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
return_obj = {
'main_data_tuples': main_data_tuples,
'edge_list_negative_values': edge_list_negative_values,
'all_data_tuples': all_data_tuples
}
#print dumps(non_core_counts)
return return_obj
def get_heatmap_graph_from_es_by_id_using_neighbors(elasticId, gene_list, number_of_edges = 200):
client = pymongo.MongoClient()
db = client.cache
gene_list_array = gene_list.split(',')
query_list_found = []
x_edge_overlap = []
y_edge_overlap = []
cluster_annotations = []
heat_map_graph = db.heat_map_graph
heat_map_found = heat_map_graph.find_one({'clusterId': elasticId})
if(heat_map_found is not None):
return heat_map_found['heat_map']
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
# doc_type = search_type,
body = search_body
)
if(len(results['hits']['hits']) > 0):
result = results['hits']['hits'][0]['_source']
cluster_annotations = result['hypergeometric_scores']
if(len(result['correlation_matrix']) < 1):
resultx = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}],
'annotations': []
}
return resultx
total_edges = len(result['correlation_matrix'])
calculated_cut_off = get_top_200_weight(result['correlation_matrix'], number_of_edges)
print calculated_cut_off
#=================================
# GET QUERY LIST OVERLAP
#=================================
for gene_list_item in gene_list_array:
if(gene_list_item == 'WEE1'):
my_test_str = ''
x_node_index = findIndexByKeyValue(result['x_node_list'], 'name', gene_list_item)
y_node_index = findIndexByKeyValue(result['y_node_list'], 'name', gene_list_item)
if(x_node_index >= 0):
query_list_found.append(gene_list_item)
else:
if(y_node_index >= 0):
query_list_found.append(gene_list_item)
x_edge_overlap.append(x_node_index)
#===================================
# GET QUERY LIST OVERLAP FOR EDGES
# i.e. EDGE INDEX from:0 to:1
#===================================
if(x_node_index >= 0):
x_edge_overlap.append(x_node_index)
if(y_node_index >= 0):
y_edge_overlap.append(y_node_index)
#===========================
# GENERATE NODES EXPRESSION
# DICTIONARY
#===========================
node_expression_dict = {}
expression_value_list = []
for x_node_item in result['x_node_list']:
node_expression_dict[x_node_item['name']] = x_node_item['value']
expression_value_list.append(x_node_item['value'])
for y_node_item in result['y_node_list']:
if(y_node_item['name'] not in node_expression_dict):
node_expression_dict[y_node_item['name']] = y_node_item['value']
expression_value_list.append(x_node_item['value'])
expression_max = max(expression_value_list)
expression_min = min(expression_value_list)
#===================================
# GENERATE NODES & EDGES DATAFRAME
# BASED ON EDGE CUT_OFF
#===================================
filtered_graph_data = filter_edges_to_tuples(result, calculated_cut_off, x_edge_overlap, y_edge_overlap, number_of_edges, for_heat_prop_use=True)
main_data_tuples = filtered_graph_data['main_data_tuples']#[]
all_data_tuples = filtered_graph_data['all_data_tuples']#[]
query_nodes_data_tuples = []
edge_counter = 0
non_core_counts = {}
edge_list_negative_values = filtered_graph_data['edge_list_negative_values']#[]
# for correlation_record in result['correlation_matrix']:
# correlation_value = correlation_record['correlation_value']
# all_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
# if( (abs(correlation_value) >= calculated_cut_off) and edge_counter < number_of_edges):
# main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
# edge_counter += 1
# #print 'Core: ' + str(edge_counter)
# if(correlation_value < 0):
# edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
# else:
# if((correlation_record['x_loc'] in x_edge_overlap) or (correlation_record['y_loc'] in y_edge_overlap)):
# max_reached = False
# if(correlation_record['x_loc'] in non_core_counts):
# non_core_counts[correlation_record['x_loc']] += 1
# if(non_core_counts[correlation_record['x_loc']] > 10):
# max_reached = True
# elif(correlation_record['y_loc'] in non_core_counts):
# non_core_counts[correlation_record['y_loc']] += 1
# if(non_core_counts[correlation_record['y_loc']] > 10):
# max_reached = True
# elif((correlation_record['x_loc'] in x_edge_overlap)):
# non_core_counts[correlation_record['x_loc']] = 1
# elif((correlation_record['y_loc'] in y_edge_overlap)):
# non_core_counts[correlation_record['y_loc']] = 1
#print 'Non-core: ' + str(edge_counter)
# if(not max_reached):
# edge_counter += 1
# main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
## query_nodes_data_tuples.append(correlation_record)
# if(correlation_value < 0):
# edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
#print dumps(non_core_counts)
print 'main_data_tuples: ' + str(len(main_data_tuples))
# for correlation_record in query_nodes_data_tuples:
# correlation_value = correlation_record['correlation_value']
# if((correlation_record['x_loc'] in x_edge_overlap) and (correlation_record['x_loc'] not in found_x_query_node_edge)):
# found_x_query_node_edge.append(correlation_record['x_loc'])
# main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'], result['y_node_list'][correlation_record['y_loc']]['name'], np.abs(correlation_record['correlation_value'])))#
# if((correlation_record['y_loc'] in y_edge_overlap) and (correlation_record['y_loc'] not in found_y_query_node_edge)):
# found_y_query_node_edge.append(correlation_record['y_loc'])
# main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'], result['y_node_list'][correlation_record['y_loc']]['name'], np.abs(correlation_record['correlation_value'])))
#===============================
# CUTOFF was too stringent.
# no edges produced. Recompute
#===============================
if(len(main_data_tuples) < 1):
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
if((abs(correlation_value) >= 0.5)):
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type'], np.abs(correlation_record['correlation_value'])))
if(correlation_value < 0):
edge_list_negative_values.append((result['x_node_list'][correlation_record['x_loc']]['name'] + '_' + result['x_node_list_type'], result['y_node_list'][correlation_record['y_loc']]['name'] + '_' + result['y_node_list_type']))
mainG = nx.Graph()
mainG.add_weighted_edges_from(all_data_tuples)
adjacency_matrix = nx.adjacency_matrix(mainG)
for neighbor_edges in mainG.edges_iter(gene_list_array, data=True):
main_data_tuples.append((neighbor_edges[0],neighbor_edges[1],neighbor_edges[2]['weight']))
result = generate_graph_from_tab_file(main_data_tuples, edge_list_negative_values, gene_list_array, elasticId, 'clustering_coefficient')
result['heat_map']['filter_message'] = 'Showing top %d edges out of %d' % (len(result['heat_map']['links']), total_edges)
result['heat_map']['annotations'] = cluster_annotations
#================================
# INSERT QUERY NODE IF NOT FOUND
# IN RESULTING NETWORK
#================================
for xy_item in query_list_found:
foundItem = False
for node in result['heat_map']['nodes']:
if(node['id'].replace('_g', '').replace('_v', '').replace('_m', '') == xy_item):
foundItem = True
break
if(not foundItem):
result['heat_map']['nodes'].append(
{
'bfrac': 207,
'degree': 0,
'gfrac': 120,
'rfrac': 0,
'com': 999,
'id': xy_item
}
)
#================================
# ADD EXPRESSION VALUE TO THE
# NODES (Normalize)
#================================
for node in result['heat_map']['nodes']:
if(node['id'].replace('_g', '').replace('_v', '').replace('_m', '') in node_expression_dict):
if(expression_max != expression_min):
node['expression_value'] = (node_expression_dict[node['id'].replace('_g', '').replace('_v', '').replace('_m', '')] - expression_min)/ (expression_max - expression_min)
else:
node['expression_value'] = 1.0
return result['heat_map']
else:
result = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}],
'annotations':[]
}
return result
print("Got %d Hits:" % results['hits']['total'])
if(results['hits']['total'] < 1):
print 'no results'
#return return_value
def get_heatmap_graph_from_es_by_id_no_processing(elasticId, gene_list):
client = pymongo.MongoClient()
db = client.cache
heat_map_graph = db.heat_map_graph
heat_map_found = heat_map_graph.find_one({'clusterId': elasticId})
if(heat_map_found is not None):
return heat_map_found['heat_map']
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
# doc_type = search_type,
body = search_body
)
if(len(results['hits']['hits']) > 0):
result = results['hits']['hits'][0]['_source']
#if(result['correlation_matrix_degree'] > 300000):
if(len(result['correlation_matrix']) < 1):
resultx = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}]
}
return resultx
#===================================
# GENERATE NODES & EDGES DATAFRAME
# BASED ON EDGE CUT_OFF
#===================================
main_data_tuples = []
edge_list_negative_values = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'], result['y_node_list'][correlation_record['y_loc']]['name'], correlation_record['correlation_value']))
Gsmall = nx.Graph()
Gsmall.add_weighted_edges_from(main_data_tuples)
cluster_edges = Gsmall.edges(data=True)
cluster_nodes = Gsmall.nodes()
cluster_edges_array = []
cluster_data_frame = 'source\t target\t weight\n'
for edge in cluster_edges:
cluster_edges_array.append({'source': edge[0], 'target': edge[1], 'weight': edge[2]['weight']})
#cluster_data_frame += '%s\t%s\t%s\n' % (edge[0],edge[1],str(edge[2]['weight']))
#return {'node_count': len(cluster_nodes), 'edge_count': len(cluster_edges_array), 'nodes': cluster_nodes, 'edges': cluster_edges_array}
return {'edges': cluster_edges_array}
else:
result = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}]
}
return result
print("Got %d Hits:" % results['hits']['total'])
if(results['hits']['total'] < 1):
print 'no results'
#return return_value
def convert_cluster_to_cx_es_by_id(elasticId):
print elasticId
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results['hits']['hits']) > 0):
result = results['hits']['hits'][0]['_source']
if(len(result['correlation_matrix']) < 1):
resultx = {
'directed':False,
'nodes':[],
'links':[]
}
return resultx
#===================================
# GENERATE NODES & EDGES DATAFRAME
# BASED ON EDGE CUT_OFF
#===================================
main_data_tuples = []
edge_list_negative_values = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
main_data_tuples.append((result['x_node_list'][correlation_record['x_loc']]['name'], result['y_node_list'][correlation_record['y_loc']]['name'], correlation_record['correlation_value']))
Gsmall = nx.Graph()
Gsmall.add_weighted_edges_from(main_data_tuples)
export_edges = Gsmall.edges()
export_nodes = Gsmall.nodes()
#ndex_gsmall = NdexGraph(networkx_G=Gsmall)
ndex_gsmall = NdexGraph()
ndex_nodes_dict = {}
for export_node in export_nodes:
ndex_nodes_dict[export_node] = ndex_gsmall.add_new_node(export_node)
for export_edge in export_edges:
ndex_gsmall.add_edge_between(ndex_nodes_dict[export_edge[0]],ndex_nodes_dict[export_edge[1]])
#print export_edge[0] + ' ' + export_edge[1]
#print str(ndex_nodes_dict[export_edge[0]]) + ' ' + str(ndex_nodes_dict[export_edge[1]])
ndex_gsmall.write_to('../../cx/' + elasticId + '_manual.cx')
#cx_from_networkn = ndex_gsmall.to_cx()
#print dumps(cx_from_networkn)
cluster_edges = Gsmall.edges(data=True)
cluster_nodes = Gsmall.nodes()
cluster_edges_array = []
cluster_data_frame = 'source\t target\t weight\n'
for edge in cluster_edges:
cluster_edges_array.append({'source': edge[0], 'target': edge[1], 'weight': edge[2]['weight']})
#cluster_data_frame += '%s\t%s\t%s\n' % (edge[0],edge[1],str(edge[2]['weight']))
#return {'node_count': len(cluster_nodes), 'edge_count': len(cluster_edges_array), 'nodes': cluster_nodes, 'edges': cluster_edges_array}
return {'edges': cluster_edges_array}
else:
result = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}]
}
return result
print("Got %d Hits:" % results['hits']['total'])
if(results['hits']['total'] < 1):
print 'no results'
#return return_value
def findIndexByKeyValue(obj, key, value):
mystr = ''
for i in range(0,len(obj)):
if (obj[i][key] == value):
return i
return -1
def get_top_200_weight(cor_matrix, number_of_edges):
cor_values_array = [];
for correlation_record in cor_matrix:
cor_values_array.append(correlation_record['correlation_value'])
cor_values_array_sorted = sorted(cor_values_array, reverse=True)[:number_of_edges]
return cor_values_array_sorted[-1]
def generate_filtered_matrix(elasticId, gene_list, number_of_edges):
client = pymongo.MongoClient()
db = client.cache
gene_list_array = gene_list.split(',')
query_list_found = []
return_array_string = '[]'
x_edge_overlap = []
y_edge_overlap = []
heat_map_graph = db.heat_map_graph
heat_map_found = heat_map_graph.find_one({'clusterId': elasticId})
if(heat_map_found is not None):
return heat_map_found['heat_map']
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results['hits']['hits']) > 0):
result = results['hits']['hits'][0]['_source']
x_axis_type = '_' + result['x_node_list_type']
y_axis_type = '_' + result['y_node_list_type']
x_axis_index = []
y_axis_index = []
if(len(result['correlation_matrix']) < 1):
resultx = {
'directed':False,
'nodes':[{'bfrac':40.800000000000004,'degree':138,'gfrac':0,'rfrac':255,'com':0,'id':'WNT2B_v'},{'bfrac':191.25,'degree':2,'gfrac':0,'rfrac':255,'com':1,'id':'SLC6A6'}],
'links':[{'source':0,'target':1,'weight':1}]
}
return resultx
calculated_cut_off = get_top_200_weight(result['correlation_matrix'], number_of_edges)
print calculated_cut_off
#=================================
# GET QUERY LIST OVERLAP
#=================================
for gene_list_item in gene_list_array:
if(gene_list_item == 'WEE1'):
my_test_str = ''
x_node_index = findIndexByKeyValue(result['x_node_list'], 'name', gene_list_item)
y_node_index = findIndexByKeyValue(result['y_node_list'], 'name', gene_list_item)
if(x_node_index >= 0):
query_list_found.append(gene_list_item)
else:
if(y_node_index >= 0):
query_list_found.append(gene_list_item)
x_edge_overlap.append(x_node_index)
#===================================
# GET QUERY LIST OVERLAP FOR EDGES
# i.e. EDGE INDEX from:0 to:1
#===================================
if(x_node_index >= 0):
x_edge_overlap.append(x_node_index)
if(y_node_index >= 0):
y_edge_overlap.append(y_node_index)
#===================================
# GENERATE NODES & EDGES DATAFRAME
# BASED ON EDGE CUT_OFF
#===================================
filtered_graph_data = filter_edges_to_tuples(result, calculated_cut_off, x_edge_overlap, y_edge_overlap, number_of_edges)
main_data_tuples = filtered_graph_data['main_data_tuples']#[]
all_data_tuples = filtered_graph_data['all_data_tuples']#[]
query_nodes_data_tuples = []
edge_counter = 0
non_core_counts = {}
edge_list_negative_values = filtered_graph_data['edge_list_negative_values']#[]
Gsmall = nx.Graph()
Gsmall.add_weighted_edges_from(main_data_tuples)
nodes = Gsmall.nodes()
#=======================================================================================
# Because the adjacency matrix combines both axes we need to do some post-filtering
# to limit the output to the respective node type. This only applies to non-symetrical
# clusters, but works for symetrical types as well.
# Determine which rows/cols have nodes for their respective axis
# i.e. if the y axis is type '_v' then we want a list of all the indexes with
# nodes of type '_v' so we can remove them from the x axis
#=======================================================================================
node_index = 0
xValues = []
yValues = []
for node in nodes:
if(node.endswith(x_axis_type)):
x_axis_index.append(node_index)
xValues.append(node)
if(node.endswith(y_axis_type)):
yValues.append(node)
y_axis_index.append(node_index)
node_index += 1
edges=Gsmall.edges(data=True)
myMatrix = nx.adjacency_matrix(Gsmall, nodes, weight='weight')
raw_matrix_array = np.array(myMatrix.todense())
clustered_matrix = HeatMaps.cluster_heat_map_2D({'zValues': raw_matrix_array, 'xValues': xValues, 'yValues': yValues})#cluster_heat_map({'zValues': raw_matrix_array, 'xValues': xValues, 'yValues': yValues})
matrix_array = clustered_matrix['zValues']
#matrix_array = raw_matrix_array
return_array_string = '['
row_index = 0
for row in matrix_array:
col_index = 0
if(row_index in x_axis_index):
return_array_string += '['
for col in row:
if(col_index in y_axis_index):
return_array_string += str(col) + ','
col_index += 1
if(return_array_string[-1] == ','):
return_array_string = return_array_string[:-1]
return_array_string += '],'
row_index += 1
#print return_array_string[-1]
if(return_array_string[-1] == ','):
return_array_string = return_array_string[:-1]
return_array_string += ']'
# zz = np.matrix([[str(ele) for ele in a] for a in np.array(myMatrix.todense())])
#return_string = '{"zValues": ' + return_array_string + ', "yValues": ' + dumps(clustered_matrix['xValues']) + ', "xValues": ' + dumps(clustered_matrix['yValues']) + '}'
return_string = '{"zValues": ' + return_array_string + ', "yValues": ' + dumps(clustered_matrix['xValues']) + ', "xValues": ' + dumps(clustered_matrix['yValues']) + '}'
return return_string
def generate_graph_from_tab_file(edge_list_prerendered, edge_list_negative_values, query_genes, cluster_id, color_type='betweenness_centrality', colormap='spring_r'):
'''
this function takes a processed cluster file ('input_file_name': output of process clustering results in cluster_analysis_module)
and saves a json file for every community in the file, starting with 'out_file_start'. 'input_file_name' and 'out_file_start'
should be prepended with location.
'''
# first load in a network (edge list)
#edge_list_df = cluster_IO #pd.read_csv(cluster_IO,sep='\t')
group_ids = 'X' #np.unique(edge_list_df['group_id'])
for focal_group in group_ids:
#print focal_group
#save_file_name = out_file_start + '_' + str(int(focal_group)) + '.json'
#idx_group = (edge_list_df['group_id']==focal_group)
#idx_group = list(edge_list_df['group_id'][idx_group].index)
# make a network out of it
#edge_list = [(edge_list_df['var1'][i], edge_list_df['var2'][i], np.abs(edge_list_df['corr'][i])) for i in idx_group if edge_list_df['corr'][i] !=0]
Gsmall = nx.Graph()
Gsmall.add_weighted_edges_from(edge_list_prerendered)
permaGsmall = Gsmall
#Gsmall.add_weighted_edges_from(edge_list)
#outdeg = Gsmall.degree()
outdeg = Gsmall.degree()
edges_count = Gsmall.size()
# to_remove = [n for n in outdeg if outdeg[n] == 1]
# to_remove_count = len(to_remove)
# if((edges_count - to_remove_count) >= 5):
# Gsmall.remove_nodes_from(to_remove)
# permaGsmall = Gsmall
# outdeg = Gsmall.degree()
# to_remove = [n for n in outdeg if outdeg[n] == 0]
# Gsmall.remove_nodes_from(to_remove)
# if(Gsmall.size() < 5):
# Gsmall = permaGsmall
nodes = Gsmall.nodes()
numnodes = len(nodes)
edges=Gsmall.edges(data=True)
numedges = len(edges)
data = []
# for correlation_record in result['correlation_matrix']:
# correlation_value = correlation_record['correlation_value']
# if( (abs(correlation_value) >= calculated_cut_off) and edge_counter < 2000):
# data.append([correlation_record['x_loc'], correlation_record['y_loc'], correlation_value])#abs(correlation_value)])
# array = np.zeros((len(result['x_node_list']), len(result['y_node_list'])))
# for row, col, val in data:
# index = (row, col)
# array[index] = val
# return_array_string = '['
# for row in array:
# return_array_string += '['
# for col in row:
# return_array_string += str(col) + ','
# return_array_string += '],'
# return_array_string += ']'
# pre-calculate the node positions (multiply by 1000 below... may need to change this)
# node_pos = nx.spring_layout(Gsmall, k=0.2, iterations=5)
print 'Edges len: ' + str(len(edges))
if color_type=='community':
partition = community.best_partition(Gsmall)
partition = pd.Series(partition)
col_temp = partition[Gsmall.nodes()]
# Set up json for saving
# what should the colors be??
num_communities = len(np.unique(col_temp))
color_list = plt.cm.gist_rainbow(np.linspace(0, 1, num_communities))
# blend the community colors (so that to-nodes are a mixture of all the communities they belong to)
rfrac,gfrac,bfrac=calc_community_fraction(Gsmall,Gsmall.nodes(),Gsmall.nodes(),partition,color_list)
pos_x_y = {}
communities_counter = []
for debug_node in nodes:
r_color = int(rfrac[debug_node]*255) + randint(0,19)
r_color = 10 if r_color < 5 else r_color
g_color = int(gfrac[debug_node]*255) + randint(0,19)
g_color = 10 if g_color < 5 else g_color
b_color = int(bfrac[debug_node]*255) + randint(0,19)
b_color = 10 if b_color < 5 else b_color
pos_x_y[debug_node] = (r_color, b_color)
#print str(r_color) + ', ' + str(b_color)
community_color_concat = str(rfrac[debug_node]) + str(gfrac[debug_node]) + str(bfrac[debug_node])
if(community_color_concat not in communities_counter):
communities_counter.append(community_color_concat)
# pre-calculate the node positions (multiply by 1000 below... may need to change this)
if(len(communities_counter) >= 3):
node_pos = nx.spring_layout(Gsmall, k=0.02, pos=pos_x_y, iterations=5)
for node_pos_item in node_pos:
print node_pos_item
else:
node_pos = nx.spring_layout(Gsmall, k=0.02, iterations=5)
nodes_dict = [{"id":n,"com":col_temp[n],"degree":Gsmall.degree(n),
"rfrac":int(rfrac[n]*255),"gfrac":int(gfrac[n]*255),"bfrac":int(bfrac[n]*255),
"x":node_pos[n][0]*1000,
"y":node_pos[n][1]*1000} for n in nodes]
elif color_type=='expression_value':
mystr = ''
elif color_type=='clustering_coefficient':
# pre-calculate the node positions (multiply by 1000 below... may need to change this)
#node_pos = nx.spring_layout(Gsmall, k=0.04)
#node_pos = nx.spring_layout(Gsmall, k=0.04)
cmap= plt.get_cmap(colormap)
rfrac,gfrac,bfrac,local_cc=calc_clustering_coefficient(Gsmall,cmap)
nodes_dict = [{"id":n,"com":0,"degree":Gsmall.degree(n),
"rfrac":rfrac[n]*255,"gfrac":gfrac[n]*255,"bfrac":bfrac[n]*255,
"local_cc": local_cc[n],
#"x":node_pos[n][0]*1000,
#"y":node_pos[n][1]*1000
} for n in nodes]
elif color_type=='betweenness_centrality':
# pre-calculate the node positions (multiply by 1000 below... may need to change this)
node_pos = nx.spring_layout(Gsmall, k=0.2, iterations=5)
cmap= plt.get_cmap(colormap)
rfrac,gfrac,bfrac=calc_betweenness_centrality(Gsmall,cmap)
nodes_dict = [{"id":n,"com":0,"degree":Gsmall.degree(n),
"rfrac":rfrac[n]*255,"gfrac":gfrac[n]*255,"bfrac":bfrac[n]*255,
"x":node_pos[n][0]*1000,
"y":node_pos[n][1]*1000} for n in nodes]
node_map = dict(zip(nodes,range(numnodes))) # map to indices for source/target in edges
edges_dict = [{"source":node_map[edges[i][0]], "target":node_map[edges[i][1]], "weight":edges[i][2]['weight']} for i in range(numedges)]
#==========================================
# Restore the correlation sign (+/-)
#
# create a list of edges that should be
# negative using [source, target] format
#==========================================
edge_list_negative_values_adjusted = [(node_map[neg_edge_item[0]], node_map[neg_edge_item[1]]) for neg_edge_item in edge_list_negative_values if((neg_edge_item[0] in node_map) and (neg_edge_item[1] in node_map))]
for edge_dict_item in edges_dict: #AVMGJVnyRXVvO0gLpdDP
for e_neg in edge_list_negative_values_adjusted:
if(e_neg[0] == edge_dict_item['source'] and e_neg[1] == edge_dict_item['target']) or (e_neg[1] == edge_dict_item['source'] and e_neg[0] == edge_dict_item['target']):
edge_dict_item['weight'] = -1 * np.abs(edge_dict_item['weight'])
import json
json_graph = {"directed": False, "nodes": nodes_dict, "links":edges_dict}
print 'Edge count: ' + str(len(edges_dict))
#client = pymongo.MongoClient()
#db = client.cache
#heat_maps = db.heat_map_graph
a = {
'clusterId': focal_group,
'heat_map': json_graph
}
return a
def calc_clustering_coefficient(G,cmap):
# this function calculates the clustering coefficient of each node, and returns colors corresponding to these values
local_CC = nx.clustering(G,G.nodes())
local_CC_scale = [round(local_CC[key]*float(255)) for key in G.nodes()]
local_CC_scale = pd.Series(local_CC_scale,index=G.nodes())
rfrac = [cmap(int(x))[2] for x in local_CC_scale]
gfrac = [cmap(int(x))[1] for x in local_CC_scale]
bfrac = [cmap(int(x))[0] for x in local_CC_scale]
rfrac = pd.Series(rfrac,index=G.nodes())
gfrac = pd.Series(gfrac,index=G.nodes())
bfrac = pd.Series(bfrac,index=G.nodes())
return rfrac,gfrac,bfrac,local_CC
def calc_betweenness_centrality(G,cmap):
# this function calculates the betweenness centrality of each node, and returns colors corresponding to these values
local_BC = nx.betweenness_centrality(G)
local_BC_scale = [round(local_BC[key]*float(255)) for key in G.nodes()]
local_BC_scale = pd.Series(local_BC_scale,index=G.nodes())
rfrac = [cmap(int(x))[0] for x in local_BC_scale]
gfrac = [cmap(int(x))[1] for x in local_BC_scale]
bfrac = [cmap(int(x))[2] for x in local_BC_scale]
rfrac = pd.Series(rfrac,index=G.nodes())
gfrac = pd.Series(gfrac,index=G.nodes())
bfrac = pd.Series(bfrac,index=G.nodes())
return rfrac,gfrac,bfrac
# this function calculates fraction of to-node connections belonging to each community
def calc_community_fraction(G,to_nodes,from_nodes, from_nodes_partition, color_list):
# set color to most populous community
degree = G.degree(to_nodes)
rfrac,gfrac,bfrac=pd.Series(index=G.nodes()),pd.Series(index=G.nodes()),pd.Series(index=G.nodes())
for t in to_nodes:
t_neighbors = G.neighbors(t)
t_comms = [from_nodes_partition[i] for i in t_neighbors]
t_comms = pd.Series(t_comms)
unique_comms = t_comms.unique()
num_unique_comms = len(unique_comms)
num_n = pd.Series(index=unique_comms)
for n in unique_comms:
num_n[n] = sum(t_comms==n)
# find max num_n
color_max = color_list[num_n.argmax()][0:3]
# how much is shared by other colors?
#print(num_n)
frac_shared = 1-np.max(num_n)/np.sum(num_n)
# darken the color by this amount
#color_dark = shade_color(color_max,-frac_shared*100)
color_dark = (color_max[0]*(1-frac_shared), color_max[1]*(1-frac_shared), color_max[2]*(1-frac_shared))
rfrac[t] = color_dark[0]
gfrac[t] = color_dark[1]
bfrac[t] = color_dark[2]
# fill in the from_nodes colors
for f in from_nodes:
f_group = from_nodes_partition[f]
rfrac[f] = color_list[f_group][0]
gfrac[f] = color_list[f_group][1]
bfrac[f] = color_list[f_group][2]
return rfrac,gfrac,bfrac
def get_all_cluster_ids():
search_body = {
'fields': ['_id'],
'query' : {
'match_all': {}
},
'size': 70000
}
result = es.search(
index = 'clusters',
doc_type = 'clusters_tcga_louvain',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
hit_ids = ''
hit_ids_array = []
for hit in result['hits']['hits']:
hit_ids += '"' + hit['_id'] + '",'
hit_ids_array.append(hit['_id'])
if(len(hit_ids) > 0):
hit_ids = hit_ids[:-1]
of = open('all_cluster_ids.json', 'w')
i = 0
for cluster_id in hit_ids_array:
of.write(cluster_id + '\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write('\n')
of.write(hit_ids + '\n')
of.close()
#print hit_ids_array
def get_remaining_thumbnails():
all_thumbs = [];
with open("thumbs_master_list.txt", "r") as f:
for line in f:
all_thumbs.append(line)
partial_thumbs = [];
with open("thumbs_partial_list.txt", "r") as f:
for line in f:
partial_thumbs.append(line)
#all_all = [set(all_thumbs), set(partial_thumbs)]
leftovers = list(set(all_thumbs) - set(partial_thumbs))
print len(leftovers)
of = open('thumbnails_leftovers.txt', 'w')
i = 0
id_array_string = ''
for cluster_id in leftovers:
id_array_string += '"' + cluster_id.rstrip() + '",'
of.write(id_array_string)
of.close()
def get_cluster_document_from_elastic_by_id2(elasticId):
xValues = []
yValues = []
search_body = {
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
}
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results) > 0):
result = results['hits']['hits'][0]['_source']
for x_label in result['x_node_list']:
xValues.append(x_label['name'])
for y_label in result['y_node_list']:
yValues.append(y_label['name'])
data = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
data.append([correlation_record['x_loc'], correlation_record['y_loc'], correlation_value])
array = np.zeros((len(result['x_node_list']), len(result['y_node_list'])))
for row, col, val in data:
index = (row, col)
array[index] = val
#==========================================
# Because the z matrix is transposed we
# will switch the labels for X and Y coord.
#==========================================
return_value = {
'xValues': yValues,
'yValues': xValues,
'zValues': array
}
print("Got %d Hits:" % results['hits']['total'])
if(results['hits']['total'] < 1):
print 'no results'
return return_value
def get_cluster_document_from_elastic_by_id3(elasticId):
client = pymongo.MongoClient()
db = client.cache
heat_maps = db.heat_maps
heat_map_found = heat_maps.find_one({'elasticId': elasticId})
if(heat_map_found is not None):
print 'Get cached heatmap'
return heat_map_found['heat_map']
else:
es_data_matrix = get_cluster_document_from_elastic_by_id2(elasticId)
return dumps(es_data_matrix) #heat_map_ordered_transposed
def get_heatmap_export(esId):
cluster_result = get_cluster_document_from_elastic_by_id3('2020035052')
exportArray = []
xValues = cluster_result.xValues;
yValues = cluster_result.yValues;
zValues = cluster_result.zValues;
tempHeader = ','
for xVal in xValues:
tempHeader += xVal + ','
exportArray.append(tempHeader)
for i in range(len(yValues)):
tempRowString = yValues[i] + ','
for j in range(len(xValues)):
tempRowString += zValues[i][j] + ','
exportArray.append(tempRowString)
# var exportArrayString = "data:text/csv;charset=utf-8,";
# for (var k = 0; k <= $scope.plotlyData.yValues.length; k++) {
# exportArrayString += $scope.plotlyData.exportArray[k] + '\n';
# }
def load_x_y_z_cluster_data(esId):
# results = get_cluster_document_from_elastic_by_id3('2020035052')
xValues = []
yValues = []
cluster_info = ''
search_body = {
'query': {
'bool': {
'must': [
{'match': {
'_id': esId
}}
]
}
}
}
results = es.search(
index = 'clusters',
body = search_body
)
if(len(results) > 0):
result = results['hits']['hits'][0]['_source']
cluster_info = result['network_full_name']
for x_label in result['x_node_list']:
xValues.append(x_label['name'])
for y_label in result['y_node_list']:
yValues.append(y_label['name'])
data = []
for correlation_record in result['correlation_matrix']:
correlation_value = correlation_record['correlation_value']
data.append([correlation_record['x_loc'], correlation_record['y_loc'], correlation_value])
array = np.zeros((len(result['x_node_list']), len(result['y_node_list'])))
for row, col, val in data:
index = (row, col)
array[index] = val
zValues = array;
sample_mat = pd.DataFrame(data=zValues, # values
index=xValues, # 1st column as index
columns=yValues) # 1st row as the column names
numrows = len(sample_mat)
numcols = len(sample_mat.columns)
# check if symmetric
if numrows==numcols:
idx_to_node = dict(zip(range(len(sample_mat)),list(sample_mat.index)))
sample_mat = np.array(sample_mat)
sample_mat = sample_mat[::-1,::-1] # reverse the indices for use in graph creation
else:
zmat = np.array(sample_mat)
zmat = zmat[::-1,0:-1] # reverse the indices for use in graph creation
ylist = list(sample_mat.index)
xlist = list(sample_mat.columns)
zsym,xsym,ysym = symmetrize_matrix(zmat,xlist,ylist)
sample_mat = zsym
idx_to_node = dict(zip(range(len(sample_mat)),xlist))
#G_cluster = nx.Graph()
G_cluster = nx.from_numpy_matrix(np.abs(sample_mat))
G_cluster = nx.relabel_nodes(G_cluster,idx_to_node)
return {'cluster': G_cluster, 'cluster_info': cluster_info}
def symmetrize_matrix(zmat,xlist,ylist):
'''
Simple helper function to symmetrize an assymmetric matrix
inputs:
- zmat: a 2d matrix (dimensions r x c)
- xlist: ordered list of row names (length r)
- ylist: ordered list of column names (length c)
outputs:
- zsym: symmetrized matrix (dimensions (r+c) x (r+c))
- xsym: [xlist, ylist]
- ysim: [xlist, ylist] (Note: ysim = xsim)
'''
numrows,numcols = zmat.shape
# initialize the symmetric version of zmat
zsym = np.zeros((numrows+numcols,numrows+numcols))
# fill diagonal with 1s
np.fill_diagonal(zsym,1)
zsym[0:numrows,numrows:]=zmat
zsym[numrows:,0:numrows]=np.transpose(zmat)
xsym = []
xsym.extend(xlist)
xsym.extend(ylist)
ysym = []
ysym.extend(xlist)
ysym.extend(ylist)
return zsym,xsym,ysym
def transform_matrix_to_graph():
my_matrix = [
[-0.37340000000000001, 0.0, -0.37459999999999999, -0.39660000000000001, -0.4123, -0.36890000000000001],
[0.0, 0.31709999999999999, 0.31340000000000001, 0.34210000000000002, 0.0, 0.32429999999999998],
[-0.32690000000000002, -0.40189999999999998, 0.50939999999999996, 0.54959999999999998, 0.57179999999999997, 0.57179999999999997],
[-0.31030000000000002, -0.41099999999999998, -0.38729999999999998, 0.0, -0.30990000000000001, 0.0],
[0.30869999999999997, 0.0, 0.30330000000000001, 0.0, 0.0, 0.0, 0.34000000000000002, 0.0, 0.34210000000000002],
[-0.43240000000000001, -0.3896, -0.39900000000000002, 0.74729999999999996, 0.85919999999999996, 0.78000000000000003]
]
for d1 in my_matrix:
for d2 in d1:
d2 = 200 - d2 * 100
my_matrix2 = [
[1, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 0]
]
A = np.array(my_matrix2)
G = nx.DiGraph(A)
x_nodes = ['A','B','C','D','E','F',]
def plot_cluster():
G=nx.random_geometric_graph(200,0.125)
pos=nx.get_node_attributes(G,'pos')
dmin=1
ncenter=0
for n in pos:
x,y=pos[n]
d=(x-0.5)**2+(y-0.5)**2
if d<dmin:
ncenter=n
dmin=d
p=nx.single_source_shortest_path_length(G,ncenter)
print p
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchViz.py",
"copies": "1",
"size": "87637",
"license": "mit",
"hash": 345687025269189400,
"line_mean": 39.5726851852,
"line_max": 412,
"alpha_frac": 0.5138354804,
"autogenerated": false,
"ratio": 3.6442531603459747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4658088640745975,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from collections import Counter
from app import PubMed
from models.TermResolver import TermAnalyzer
from elasticsearch import Elasticsearch
es = Elasticsearch(['http://ec2-52-24-205-32.us-west-2.compute.amazonaws.com:9200/'],send_get_body_as='POST') # Clustered Server
#==================================
#==================================
# STAR SEARCH
#==================================
#==================================
def get_star_search_mapped(queryTerms):
network_information = {
'searchGroupTitle': 'Star Network',
'searchTab': 'GENES',
'network': 'node',
'matchField': 'node_list.node.name',
'matchCoreNode': 'node_name',
'cancerType': 'BRCA',
'queryTerms': queryTerms
}
star_network_data = star_search_mapped(network_information)
return [star_network_data]
def get_star_search_with_disease_mapped(queryTerms, disease):
network_information = {
'searchGroupTitle': 'Star Network',
'searchTab': 'GENES',
'network': 'node',
'matchField': 'node_list.node.name',
'matchCoreNode': 'node_name',
'cancerType': 'BRCA',
'queryTerms': queryTerms
}
print disease
star_network_data = star_search_mapped(network_information, disease)
return [star_network_data]
def star_search_mapped(network_info, disease=[]):
gene_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info['searchTab'],
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
queryTermArray = network_info['queryTerms'].split(',')
unsorted_items = []
gene_super_list = []
variants_list = get_variants_by_query_list(queryTermArray)
sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
gene_network_data['geneSuperList'] = get_geneSuperList(queryTermArray, sorted_query_list)
network_info['queryTerms'] = network_info['queryTerms'].replace(",", "*")
search_body = get_searchBody(queryTermArray, network_info, disease, sorted_query_list, True)
result = es.search(
index = 'network',
doc_type = 'node',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
hitMax = 0
hitMin = 0
if(result['hits']['total'] < 1):
print 'no results'
tr = TermAnalyzer()
for hit in result['hits']['hits']:
if(hit["_source"]["node_name"] in queryTermArray):
if(hitCount == 0):
hitMax = hit['_score']
else:
hitMin = hit['_score']
geneNeighborhoodArray = [];
scoreRankCutoff = 0.039
node_list_name_and_weight = []
for genehit in hit["_source"]["node_list"]["node"]:
geneNameDisected = genehit['name'].split(':')
if(len(geneNameDisected) > 1):
geneNeighborhoodArray.append(geneNameDisected[0])
else:
geneNeighborhoodArray.append(genehit['name'])
x = [set(geneNeighborhoodArray), set(queryTermArray)]
y = set.intersection(*x)
emphasizeInfoArrayWithWeights = []
for genehit in y:
try:
match = (item for item in hit["_source"]["node_list"]['node'] if item["name"] == genehit).next()
#match['weight'] = match['weight'] * 70
emphasizeInfoArrayWithWeights.append(match)
except Exception as e:
print e.message
for gene_network_matched in y:
gene_super_list.append(gene_network_matched)
searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["degree"])
wikipedia_cancer_type = tr.get_cancer_description_by_id(hit["_source"]["network_name"]).lower().replace(',','_')
wikipedia_cancer_type = wikipedia_cancer_type[0].upper() + wikipedia_cancer_type[1:]
hit_score = float(hit["_score"])
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"],
'hit_id': hit['_id'],
'diseaseType': tr.get_cancer_description_by_id(hit["_source"]["network_name"]).replace(',',' '),
'WikipediaDiseaseType': wikipedia_cancer_type,
'clusterName': hit["_source"]["node_name"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': set(y),
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'queryGenesCount': len(emphasizeInfoArrayWithWeights)
}
unsorted_items.append(gene_network_data_items)
hitCount += 1
if(hitCount == 0):
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
gene_network_data['items'].append(gene_network_data_items)
return gene_network_data
print hitCount
foundHit = False
for network_data_item in unsorted_items:
foundHit = False
for sortedID in sorted_query_list['results']:
if sortedID['id'] == network_data_item['clusterName']:
network_data_item['pubmedCount'] = sortedID['count']
network_data_item['searchResultScoreRank'] = sortedID['normalizedValue']
for variant_parent in variants_list:
if(variant_parent['node_name'] == sortedID['id']):
network_data_item['variants'] = variant_parent['variants']
gene_network_data['items'].append(network_data_item)
foundHit = True
if(not foundHit):
network_data_item['pubmedCount'] = 0
network_data_item['searchResultScoreRank'] = 0
gene_network_data['items'].append(network_data_item)
counter_gene_list = Counter(gene_super_list)
for key, value in counter_gene_list.iteritems():
kv_item = {'queryTerm': key,
'boostValue': value}
#gene_network_data['geneSuperList'].append(kv_item)
return gene_network_data
def get_searchBody(queryTermArray, network_info, disease, sorted_query_list, isStarSearch):
should_match = []
must_match = []
returnBody = {}
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
if(isStarSearch):
should_match.append({"match": {"node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
should_match.append( { 'match': {'node_name': queryTerm} })
#should_match.append( { 'match': {'node_list.node.name': queryTerm} })
else:
should_match.append({"match": {"x_node_list.name":{"query": queryTerm,"boost": boost_value_append}}})
if len(disease) > 0:
diseaseWithSpaces = '';
for addThisDisease in disease:
if len(diseaseWithSpaces) < 1:
diseaseWithSpaces = addThisDisease
else:
diseaseWithSpaces = diseaseWithSpaces + ' ' + addThisDisease
must_match.append({'match': {'network_name': diseaseWithSpaces}})
else:
must_match.append({"match": {"network_name": "LAML ACC BLCA LGG BRCA CESC CHOL COAD ESCA FPPP GBM HNSC KICH KIRC KIRP LIHC LUAD LUSC DLBC MESO OV PAAD PCPG PRAD READ SARC SKCM STAD TGCT THYM THCA UCS UCEC UVM"}})
# REMOVING disease matching until we get that information back in the documents
if(len(disease) > 0): #isStarSearch):
returnBody = {
# 'sort' : [
# '_score'
# ],
'query': {
'bool': {
'must': must_match,
'should': should_match
}
},
'size': 15
}
else:
returnBody = {
# 'sort' : [
# '_score'
# ],
'query': {
'bool': {
#'must': must_match,
'should': should_match
}
},
'size': 15
}
return returnBody
#============================
#============================
# VARIANT LIST
#============================
#============================
def get_variants_by_query_list(queryTerms):
queryTermArray = queryTerms #.split(',')
should_match = []
for queryTerm in queryTermArray:
should_match.append({"match": {"node_name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'fields': [ 'node_name', 'total_degree', 'node_list.node.name'],
'query': {
'bool': {
'should': should_match
}
},
'size': 35
}
result = es.search(
index = 'network',
doc_type = 'dbsnp_network',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
if(result['hits']['total'] < 1):
print 'no results'
return_results = []
for hit in result['hits']['hits']:
result_node_name = hit['fields']['node_name'][0]
print result_node_name
variants_array = [];
field_count = 0
for geneNodeHit in hit["fields"]["node_list.node.name"]:
if(field_count < 4):
variants_array.append(geneNodeHit)
else:
break
field_count += 1
a = {
'node_name': result_node_name,
'variants': variants_array
}
return_results.append(a)
hitCount += 1
print hitCount
return return_results
def get_geneSuperList(queryTermArray, sorted_query_list):
returnValue = []
#sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
for queryTerm in queryTermArray:
#should_match.append( { 'match': {network_info['matchField']: queryTerm} })
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
#should_match.append({"match": {"node_list.node.name":{"query": queryTerm,"boost": boost_value_append}}})
returnValue.append({'queryTerm': queryTerm, 'boostValue': boost_value_append})
return returnValue
def get_boost_value(boostArray, idToCheck):
for boostItem in boostArray:
if(boostItem['id'] == idToCheck):
returnThisValue = boostItem['normalizedValue']
return boostItem['normalizedValue']
return 0
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchGeneTab.py",
"copies": "1",
"size": "12062",
"license": "mit",
"hash": 2978320720312095000,
"line_mean": 33.5616045845,
"line_max": 220,
"alpha_frac": 0.5341568562,
"autogenerated": false,
"ratio": 3.937969311132876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4972126167332876,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from elasticsearch import Elasticsearch
from app import elastic_search_uri
from app import util
import pymongo
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300) # Prod Clustered Server
def get_people_people_pubmed_search_mapped2(queryTerms, pageNumber=1):
"""Searches the authors index in ElasticSearch.
:param queryTerms: comma delimited string containing query terms
:param pageNumber: integer used for specifying which paged result is being requested
:return: : a list of search results
"""
computed_hash = util.compute_query_list_hash(queryTerms)
#print computed_hash
should_match = []
gene_network_data = {}
if(pageNumber != 99):
from_page = (int(pageNumber) - 1) * 10
if(from_page < 0):
from_page = 0
else:
from_page = 0
queryTermArray = queryTerms.split(',')
client = pymongo.MongoClient()
db = client.cache
cluster_search = db.author_search
cluster_search_found = cluster_search.find_one({'searchId': computed_hash, 'author_type': 'none'})
if(cluster_search_found is not None):
gene_network_data = cluster_search_found['cached_hits']
else:
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.name": queryTerm}})
search_body = {
'query': {
'filtered': {
'query': {
'bool': {
'must': [
{
'nested': {
'path': 'node_list',
'score_mode': 'sum',
'query': {
'function_score': {
'query': {
'bool': {
'should': should_match
}
},
'field_value_factor': {
'field': 'node_list.scores',
'factor': 1,
'modifier': 'none',
'missing': 1
},
'boost_mode': 'replace'
}
}
}
}
]
}
},
'filter': {
'or': {
'filters': [
{'terms': {
'network_name': [
'authors_pubmed'
]
}}
]
}
}
}
},
"from": 0,
"size": 40,
}
result = es.search(
index = 'authors',
doc_type = 'authors_pubmed',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
gene_network_data = {
'searchGroupTitle': 'People pubmed',
'clusterNodeName': "",
'searchTab': 'PEOPLE_GENE',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1',
'overlap_counts': []
}
overlap_counts_array = []
overlap_found = False
lucene_score_array = []
for hit in result['hits']['hits']:
lucene_score_array.append(hit['_score'])
lucene_score_max = max(lucene_score_array)
lucene_score_min = min(lucene_score_array)
all_genes = {}
for hit in result['hits']['hits']:
emphasizeInfoArray = []
geneNeighborhoodArray = [];
gene_pub_count = 0
for geneNodeHit in hit["_source"]["node_list"]:
gene_count = len(geneNodeHit['publications'])
geneNeighborhoodArray.append(geneNodeHit['name'])
emphasizeInfoArray.append({'gene': geneNodeHit['name'],
'gene_count': gene_count})
if(geneNodeHit['name'] in queryTermArray):
if(geneNodeHit['name'] not in all_genes):
all_genes[geneNodeHit['name']] = 0
x = [set(geneNeighborhoodArray), set(queryTermArray)]
y = set.intersection(*x)
for gene_network_matched in y:
for match_this_overlap in overlap_counts_array:
if(gene_network_matched == match_this_overlap['gene']):
match_this_overlap['count'] += 1
overlap_found = True
break
if(not overlap_found):
overlap_counts_array.append(
{
'gene': gene_network_matched,
'count': 1
}
)
emphasizeInfoArrayWithPublications = []
genes_overlap = ""
all_pub_counts = 0
for geneNodeHit in hit["_source"]["node_list"]:
for yhit in y:
if(yhit == geneNodeHit['name']):
all_pub_counts += len(geneNodeHit['publications'])
genes_overlap += yhit + ','
emphasizeInfoArrayWithPublications.append({'gene': yhit, 'publication_counts': len(geneNodeHit['publications'])})
break
if(len(genes_overlap) > 0):
genes_overlap = genes_overlap[:-1]
normalized_score = ((hit['_score'] - lucene_score_min)/(lucene_score_max - lucene_score_min)) * 10.0
if(normalized_score < 0.7):
normalized_score = 0.7
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"],
'hit_id': hit['_id'],
'diseaseType': '',
'clusterName': hit['_source']['node_name'],
'searchResultSummary': 'Pubmed', #(' + str(hit['_source']['degree']) + ')',
'searchResultScoreRank': normalized_score, #hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': emphasizeInfoArrayWithPublications, #set(y),#emphasizeInfoArray,
'genes_overlap': genes_overlap,
'emphasizeInfoArrayWithWeights': [],
'top5': False,
'hitOrder': 0,
'all_pub_counts': all_pub_counts,
'pubmedCount': hit['_source']['degree']
}
gene_network_data['items'].append(gene_network_data_items)
gene_network_data['overlap_counts'] = overlap_counts_array
for item in gene_network_data['items']:
for gene in item['emphasizeInfoArray']:
all_genes[gene['gene']] += 1
gene_network_data['geneSuperList'] = all_genes
client = pymongo.MongoClient()
db = client.cache
cluster_search = db.author_search
cluster_search.save(
{
'searchId': computed_hash,
'author_type': 'none',
'cached_hits': gene_network_data
}
)
client.close()
if(pageNumber != 99):
gene_network_data['items'] = gene_network_data['items'][from_page:from_page + 10]
return [gene_network_data]
def get_information_page_data_people_centered(elasticId, genes):
hit = get_document_from_elastic_by_id(elasticId, 'pubmed_author')
genes_array = genes.split(',')
gene_pubs = []
for node_list_item in hit['node_list']:
if(node_list_item['name'] in genes_array):
for pub in node_list_item['publications']:
pub['date'] = pub['date'][4:6] + '-' + pub['date'][6:8] + '-' + pub['date'][0:4]
gene_pubs.append(node_list_item)
return_result = {
'publications': gene_pubs
}
return return_result
def get_document_from_elastic_by_id(elasticId, search_type):
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': [
{'match': {
'_id': elasticId
}}
]
}
},
'size': 1
}
result = es.search(
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
return result['hits']['hits'][0]["_source"]
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchAuthorsTab.py",
"copies": "1",
"size": "9013",
"license": "mit",
"hash": -4442008409606309400,
"line_mean": 32.258302583,
"line_max": 137,
"alpha_frac": 0.4516809054,
"autogenerated": false,
"ratio": 4.464091134224864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5415772039624864,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from itertools import groupby
from elasticsearch import Elasticsearch
from app import elastic_search_uri
from bson.json_util import dumps
from models.ConditionSearchModel import ConditionSearchResults
import pymongo
from app import util
from operator import itemgetter
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300) # Prod Clustered Server
def get_condition_search(queryTerms, pageNumber=1):
myConditionSearchResults = ConditionSearchResults()
myConditionSearchResults.name = 'my name'
cosmic_grouped_items = get_cosmic_search(queryTerms, pageNumber)
basic_results = get_cosmic_grouped_by_tissues_then_diseases(queryTerms, pageNumber) #get_cosmic_grouped_by_disease_tissue(queryTerms, pageNumber)
myConditionSearchResults.add_simplified_cosmic_item(cosmic_grouped_items)
myConditionSearchResults.add_basic_cosmic_list(basic_results)
result = myConditionSearchResults.to_JSON()
return result
# for c_g_i in cosmic_grouped_items[0]['grouped_items']:
# for c_g_i_p in c_g_i['phenotypes']:
# #myConditionSearchResults.addGroupedCosmicConditions(c_g_i['gene_name']['name'], c_g_i_p)#['phenotype_name'])
# myConditionSearchResults.addGroupedCosmicConditionsGene(c_g_i_p['phenotype_name'], c_g_i['gene_name']['name'], c_g_i_p['group_info'])#, c_g_i_p['variants'])
# clinvar_grouped_items = get_clinvar_search(queryTerms, pageNumber)
# for c_g_i in clinvar_grouped_items[0]['grouped_items']: #phenotype_name': hit["_source"]["node_name"], 'gene_name': genehit, 'resources
#for c_g_i_p in c_g_i['searchResultTitle']:
#myConditionSearchResults.addGroupedClinvarConditions(c_g_i['gene_name']['name'], c_g_i_p)#['phenotype_name'])
# myConditionSearchResults.addGroupedClinvarConditionsGene(c_g_i_p['phenotype_name'], c_g_i['gene_name']['name'], c_g_i_p['resources'])#['phenotype_name'])
# for c_g_i_p in c_g_i['phenotype_name']:
#myConditionSearchResults.addGroupedClinvarConditions(c_g_i['gene_name']['name'], c_g_i_p)#['phenotype_name'])
# myConditionSearchResults.addGroupedClinvarConditionsGene(c_g_i_p, c_g_i['gene_name'], c_g_i['resources'])#['phenotype_name'])
# myConditionSearchResults.group_items_by_conditions()
# myConditionSearchResults.updateCounts()
# result = myConditionSearchResults.to_JSON()
# return result
def get_cosmic_grouped_items(queryTerms, phenotypes=None):
hitCount = 0
phenotype_network_data = {
'searchGroupTitle': 'Phenotypes',
'clusterNodeName': "",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.node.name": queryTerm}})
if(phenotypes is not None):
phenotypeTermArray = phenotypes.split('~')
for phenotypeTerm in phenotypeTermArray:
must_match.append({"match": {"node_name": phenotypeTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': must_match,
'should': should_match
}
},
'size': 15
}
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'size': 15
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_clinvar',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
phenotype_network_data['items'].append(gene_network_data_items)
return [phenotype_network_data]
else:
for hit in result['hits']['hits']:
hitCount += 1
type_counts = {}#{'genes': len(hit["_source"]["node_list"]['node'])}#'indel': 0, 'insertion': 0, 'deletion': 0, 'duplication': 0, 'single nucleotide variant': 0}
emphasizeInfoArrayWithWeights = []
searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["degree"])
for genehit in queryTermArray:
for item in hit["_source"]["node_list"]['node']:
if(item["name"] == genehit):
emphasizeInfoArrayWithWeights.append(item)
break
for variant_hit in hit['_source']['variant_list']['node']:
# indel, insertion, deletion, duplication, single nucleotide variant
if(upcase_first_letter(variant_hit['variant_type']) in type_counts):
type_counts[upcase_first_letter(variant_hit['variant_type'])] += 1
else:
type_counts[upcase_first_letter(variant_hit['variant_type'])] = 1
phenotype_ids = []
for phenotype_id in hit['_source']['phenotype_id_list']['node']:
ids_split = phenotype_id['name'].split(':')
if(len(ids_split) > 1):
phenotype_ids.append({ids_split[0]:ids_split[1]})
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"],
'hit_id': hit['_id'],
'diseaseType': '', #"[Phenotype = " + hit["_source"]["node_name"] + "]",
'clusterName': hit["_source"]["node_name"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': emphasizeInfoArrayWithWeights,
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'phenotype_ids': phenotype_ids,
'node_type_counts': type_counts,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'queryGenesCount': len(emphasizeInfoArrayWithWeights)
}
phenotype_network_data['items'].append(gene_network_data_items)
#==================================
# GROUP PHENOTYPE BY TARGETED GENE
#==================================
phenotype_gene_grouping = []
for phenotype_hit in phenotype_network_data['items']:
match_found = False
# After first item is already added (need to append to existing array)
for gene_loop_item in phenotype_gene_grouping:
if(len(phenotype_hit['emphasizeInfoArray']) > 0):
if(gene_loop_item['gene_name'] == phenotype_hit['emphasizeInfoArray'][0]):
gene_loop_item['searchResultTitle'].append({'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']})
match_found = True
# First item added
if(not match_found):
if(len(phenotype_hit['emphasizeInfoArray']) > 0):
phenotype_gene_grouping.append(
{
'gene_name': phenotype_hit['emphasizeInfoArray'][0],
'searchResultTitle': [{'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']}]
}
)
else:
phenotype_gene_grouping.append(
{
'gene_name': 'unknown',
'searchResultTitle': [{'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']}]
}
)
for phenotype_gene_no_count_item in phenotype_gene_grouping:
phenotype_gene_no_count_item['gene_count'] = len(phenotype_gene_no_count_item['searchResultTitle'])
#drug_gene_dumped = dumps(drug_gene_grouping)
phenotype_network_data['grouped_items'] = phenotype_gene_grouping
return [phenotype_network_data]
def get_cosmic_searchx(queryTerms, pageNumber):
hitCount = 0
from_page = (pageNumber - 1) * 50
if(from_page < 0):
from_page = 0
phenotype_network_data = {
'searchGroupTitle': 'Phenotypes',
'clusterNodeName': "",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.node.name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'from': from_page,
'size': 50
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_clinvar',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
phenotype_network_data['items'].append(gene_network_data_items)
return [phenotype_network_data]
else:
for hit in result['hits']['hits']:
hitCount += 1
type_counts = {}#{'genes': len(hit["_source"]["node_list"]['node'])}#'indel': 0, 'insertion': 0, 'deletion': 0, 'duplication': 0, 'single nucleotide variant': 0}
emphasizeInfoArrayWithWeights = []
searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["degree"])
for genehit in queryTermArray:
for item in hit["_source"]["node_list"]['node']:
if(item["name"] == genehit):
emphasizeInfoArrayWithWeights.append(item)
break
for variant_hit in hit['_source']['variant_list']['node']:
# indel, insertion, deletion, duplication, single nucleotide variant
if(upcase_first_letter(variant_hit['variant_type']) in type_counts):
type_counts[upcase_first_letter(variant_hit['variant_type'])] += 1
else:
type_counts[upcase_first_letter(variant_hit['variant_type'])] = 1
phenotype_ids = []
for phenotype_id in hit['_source']['phenotype_id_list']['node']:
ids_split = phenotype_id['name'].split(':')
if(len(ids_split) > 1):
phenotype_ids.append({ids_split[0]:ids_split[1]})
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"],
'hit_id': hit['_id'],
'diseaseType': '', #"[Phenotype = " + hit["_source"]["node_name"] + "]",
'clusterName': hit["_source"]["node_name"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': emphasizeInfoArrayWithWeights,
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'phenotype_ids': phenotype_ids,
'node_type_counts': type_counts,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'queryGenesCount': len(emphasizeInfoArrayWithWeights)
}
phenotype_network_data['items'].append(gene_network_data_items)
#==================================
# GROUP PHENOTYPE BY TARGETED GENE
#==================================
phenotype_gene_grouping = []
for phenotype_hit in phenotype_network_data['items']:
match_found = False
# After first item is already added (need to append to existing array)
for gene_loop_item in phenotype_gene_grouping:
if(len(phenotype_hit['emphasizeInfoArray']) > 0):
if(gene_loop_item['gene_name'] == phenotype_hit['emphasizeInfoArray'][0]):
gene_loop_item['searchResultTitle'].append({'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']})
match_found = True
# First item added
if(not match_found):
if(len(phenotype_hit['emphasizeInfoArray']) > 0):
phenotype_gene_grouping.append(
{
'gene_name': phenotype_hit['emphasizeInfoArray'][0],
'searchResultTitle': [{'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']}]
}
)
else:
phenotype_gene_grouping.append(
{
'gene_name': 'unknown',
'searchResultTitle': [{'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']}]
}
)
for phenotype_gene_no_count_item in phenotype_gene_grouping:
phenotype_gene_no_count_item['gene_count'] = len(phenotype_gene_no_count_item['searchResultTitle'])
#drug_gene_dumped = dumps(drug_gene_grouping)
phenotype_network_data['grouped_items'] = phenotype_gene_grouping
return [phenotype_network_data]
def get_clinvar_search(queryTerms, pageNumber):
hitCount = 0
from_page = (pageNumber - 1) * 200
if(from_page < 0):
from_page = 0
phenotype_network_data = {
'searchGroupTitle': 'Phenotypes',
'clusterNodeName': "",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
hits_by_condition = []
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'from': from_page,
'size': 100
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_clinvar',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
phenotype_network_data['items'].append(gene_network_data_items)
return [phenotype_network_data]
else:
for hit in result['hits']['hits']:
hitCount += 1
for genehit in queryTermArray:
for item in hit["_source"]["node_list"]:
if(item["name"] == genehit):
hit_resources = []
for phenotype_id in hit['_source']['phenotype_id_list']:
ids_split = phenotype_id['name'].split(':')
if(len(ids_split) > 1):
hit_resources.append({ids_split[0]:ids_split[1]})
hits_by_condition.append({'phenotype_name': [hit["_source"]["node_name"]], 'gene_name': genehit, 'resources': hit_resources})
phenotype_network_data['grouped_items'] = hits_by_condition; #phenotype_gene_grouping
return [phenotype_network_data]
def get_cosmic_search(queryTerms, pageNumber):
computed_hash = util.compute_query_list_hash(queryTerms)
#print computed_hash
search_size = 15
from_page = 0
hitCount = 0
if(pageNumber == 99):
search_size = 60
from_page = 0
else:
search_size = 15
from_page = (pageNumber - 1) * 15
if(from_page < 0):
from_page = 0
phenotype_network_data = {
'searchGroupTitle': 'Cosmic Phenotypes',
'clusterNodeName': "Cosmic",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
gene_condition_array = []
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.name": queryTerm}})
gene_condition_array.append({queryTerm: []})
search_body = {
'sort' : [
'_score'
],
'fields': ['node_list.description','node_list.tissue','node_list.name', 'node_list.cosmic_id', 'node_list.pubmed'], #, 'node_name'],
'query': {
'bool': {
'should': should_match,
'must_not': [{'match': {'node_list.description': 'NS'}}]
}
},
#'from': 0,
#'size': 60
'from': from_page,
'size': search_size
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_cosmic_mutant',
body = search_body
)
#client = pymongo.MongoClient()
#db = client.cache
#cluster_search = db.condition_search
#cluster_search.save(
# {
# 'searchId': computed_hash,
# 'condition_type': 'none',
# 'cached_hits': cached_hits
# }
#)
#client.close()
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
phenotype_network_data['items'].append(gene_network_data_items)
return [phenotype_network_data]
else:
gene_network_data_items = {
'hit_id': 'unknown',
'diseaseType': '', #"[Phenotype = " + hit["_source"]["node_name"] + "]",
}
gene_condition_unique = []
sorted_list = sorted(result['hits']['hits'], key=lambda k: k['fields']['node_list.name']) # sort by gene symbol
all_doc_json = []
for key, group in groupby(sorted_list, lambda item: item['fields']['node_list.name']):
groups = []
groups_for_set = []
groups_for_set_dict = []
groups.append(list(group))
#print key[0]
sorted_list = sorted(result['hits']['hits'], key=lambda k: k['fields']['node_list.name'])
for group_item in groups[0]:
groups_for_set.append(group_item['fields']['node_list.tissue'][0] + '|' + group_item['fields']['node_list.description'][0]) # + '~' + group_item['fields']['node_list.cosmic_id'][0])
groups_for_set_dict.append(
{
'title': group_item['fields']['node_list.tissue'][0] + '|' + group_item['fields']['node_list.description'][0],
'cosmic_id': group_item['fields']['node_list.cosmic_id'][0],
'pubmed_id': group_item['fields']['node_list.pubmed'][0],
'gene': group_item['fields']['node_list.name'][0],
'tissue': group_item['fields']['node_list.tissue'][0],
'disease': group_item['fields']['node_list.description'][0],
'esId': group_item['_id'],
'score': group_item['_score']
#'variant_gene': {
# 'variant': group_item['fields']['node_name'][0],
# 'gene': group_item['fields']['node_list.name'][0]
#}
}
)
all_doc_json.append(
{
'title': group_item['fields']['node_list.tissue'][0] + '|' + group_item['fields']['node_list.description'][0],
'cosmic_id': group_item['fields']['node_list.cosmic_id'][0],
'pubmed_id': group_item['fields']['node_list.pubmed'][0],
'gene': group_item['fields']['node_list.name'][0],
'tissue': group_item['fields']['node_list.tissue'][0],
'disease': group_item['fields']['node_list.description'][0],
'esId': group_item['_id'],
'score': group_item['_score']
#'variant_gene': {
# 'variant': group_item['fields']['node_name'][0],
# 'gene': group_item['fields']['node_list.name'][0]
#}
}
)
#print key, groups
#for groups_for_set_item in set(groups_for_set):
# print groups_for_set_item
sorted_expanded_phenotypes = []
sorted_phenotype_titles = sorted(set(groups_for_set))
sorted_phenotype_titles_dict = sorted(groups_for_set_dict, key=lambda k: k['title'])
for sorted_title in sorted_phenotype_titles:
sorted_title_parts = sorted_title.split('~')
cosmic_ids = []
variant_ids = []
pubmed_ids = []
for sorted_phenotype_cosmic_id in sorted_phenotype_titles_dict:
if(sorted_phenotype_cosmic_id['title'] == sorted_title):
if(sorted_phenotype_cosmic_id['cosmic_id'] not in cosmic_ids):
cosmic_ids.append(sorted_phenotype_cosmic_id['cosmic_id'])
#variant_ids.append(sorted_phenotype_cosmic_id['variant_gene'])
if((sorted_phenotype_cosmic_id['pubmed_id'] not in pubmed_ids) and len(sorted_phenotype_cosmic_id['pubmed_id']) > 0):
pubmed_ids.append(sorted_phenotype_cosmic_id['pubmed_id'])
sorted_expanded_phenotypes.append(
{
'phenotype_name': sorted_title, #sorted_title_parts[0],
'group_info': {
'cosmic_ids': cosmic_ids,
'pubmed_ids': pubmed_ids
},
#'variants': variant_ids,
#'cosmic_id': sorted_title_parts[1],
'hit_id': 'unknown'
}
)
gene_condition_unique.append(
{
'gene_name': {'name': key[0]},
'phenotypes': sorted_expanded_phenotypes,
'phenotypecount': len(sorted_expanded_phenotypes)
}
)
sorted_list2 = sorted(all_doc_json, key=lambda k: k['disease'])
disease_group_by = []
for disease_key, tissue_group in groupby(sorted_list2, lambda item: item['disease']):
tissues = list(tissue_group)
sorted_list3 = sorted(tissues, key=lambda k: k['tissue'])
tissue_group_by = []
genes_in_group = []
for tissue_key, item_group in groupby(sorted_list3, lambda item2: item2['tissue']):
gene_list = []
pubmed_list = []
cosmic_list = []
item_group_list = list(item_group)
for gene_check in item_group_list:
if(gene_check['gene'] not in gene_list):
gene_list.append(gene_check['gene'])
for gene_check in item_group_list:
if(gene_check['pubmed_id'] not in pubmed_list):
if(len(gene_check['pubmed_id']) > 0):
pubmed_list.append(gene_check['pubmed_id'])
for gene_check in item_group_list:
if(gene_check['cosmic_id'] not in cosmic_list):
if(len(gene_check['cosmic_id']) > 0):
cosmic_list.append(gene_check['cosmic_id'])
tissue_group_by.append(
{
'tissue': tissue_key,
'genes': gene_list,
'pubmed_ids': pubmed_list,
'cosmic_ids': cosmic_list,
'items': item_group_list,
}
)
disease_group_by.append(
{
'disease': disease_key,
'data_source': 'COSMIC',
'tissues': tissue_group_by,
'grouped_by_conditions_count': len(tissue_group_by)
}
)
#sorted_list3 = sorted(disease_group_by, key=lambda k: k['title'])
#disease_group_by = []
#for disease_key, disease_group in itertools.groupby(sorted_list2, lambda item: item['disease']):
# disease_group_by.append(
# {
# 'disease': disease_key,
# 'tissues': list(disease_group)
# }
# )
phenotype_network_data['grouped_items'] = gene_condition_unique
#print dumps([phenotype_network_data])
return disease_group_by
def get_cosmic_grouped_by_disease_tissue(queryTerms, pageNumber):
#grouped_diseases = get_cosmic_grouped_by_disease(queryTerms, pageNumber)
grouped_tissues = get_cosmic_grouped_by_tissues(queryTerms, pageNumber)
disease_groups = []
for disease in grouped_diseases:
sorted_list = sorted(disease['tissues'], key=lambda k: k['fields']['node_list.tissue']) # sort by tissues
#===============================
# GROUP BY TISSUE
#===============================
tissue_groups = []
for key, group in groupby(sorted_list, lambda item: item['fields']['node_list.tissue']):
gene_list = []
info_page_list = ''
dedup_reference = []
gene_string_list = []
for tissue_gene in list(group):
if(tissue_gene['fields']['node_list.name'][0] not in gene_string_list):
gene_string_list.append(tissue_gene['fields']['node_list.name'][0])
dedup_signature = tissue_gene['fields']['node_list.name'][0] + tissue_gene['fields']['node_list.cosmic_id'][0]
if(dedup_signature not in dedup_reference):
dedup_reference.append(dedup_signature)
gene_item = {
'gene': tissue_gene['fields']['node_list.name'][0],
'cosmic_id': tissue_gene['fields']['node_list.cosmic_id'][0],
'pubmed_id': tissue_gene['fields']['node_list.pubmed'][0],
'es_id': tissue_gene['_id'],
'disease': tissue_gene['fields']['node_list.description'][0],
'tissue': tissue_gene['fields']['node_list.tissue'][0],
'_score': tissue_gene['_score'],
'info_page': tissue_gene['fields']['node_list.cosmic_id'][0] + '~' + tissue_gene['fields']['node_list.name'][0]
}
info_page_list += tissue_gene['fields']['node_list.cosmic_id'][0] + '~' + tissue_gene['fields']['node_list.name'][0] + ','
gene_list.append(gene_item)
if(len(info_page_list) > 0):
info_page_list = info_page_list[:-1]
tissue_group_item = {
'tissue': key[0],
'genes': gene_list,
'info_page_list': info_page_list,
'gene_list': gene_string_list
}
tissue_groups.append(tissue_group_item)
disease_group_item = {
'disease': disease['disease'][0],
'tissues': tissue_groups,
'grouped_by_conditions_count': len(tissue_groups)
}
disease_groups.append(disease_group_item)
return disease_groups
def get_cosmic_grouped_by_tissues_then_diseases(queryTerms, pageNumber):
computed_hash = util.compute_query_list_hash(queryTerms)
#print computed_hash
hitCount = 0
if(pageNumber != 99):
from_page = (int(pageNumber) - 1) * 5
if(from_page < 0):
from_page = 0
else:
from_page = 0
phenotype_network_data = {
'searchGroupTitle': 'Cosmic Phenotypes',
'clusterNodeName': "Cosmic",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
gene_condition_array = []
search_these_cached_hits = []
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
cached_hits = []
conditions_dict = {}
conditions_array = []
tissue_filter_array = []
disease_filter_array = []
client = pymongo.MongoClient()
db = client.cache
conditions_search = db.conditions_search
conditions_search_found = conditions_search.find_one({'searchId': computed_hash})
if(conditions_search_found is not None):
cached_hits = conditions_search_found['cached_hits']
else:
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.name": queryTerm}})
gene_condition_array.append({queryTerm: []})
search_body = {
'sort' : [
'_score'
],
#'fields': ['node_list.description','node_list.tissue','node_list.name', 'node_list.cosmic_id', 'node_list.pubmed'], #, 'node_name'],
'query': {
'bool': {
'should': should_match,
'must_not': [{'match': {'node_list.description': 'NS'}}]
}
},
'from': 0,
'size': 120
#'from': from_page,
#'size': 15
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_cosmic_mutant',
body = search_body
)
if(result['hits']['total'] < 1):
print 'no results'
return []
else:
hit_list = []
for hit in result['hits']['hits']:
for node_list_item in hit['_source']['node_list']:
node_list_item['_score'] = hit['_score']
hit_list.append(node_list_item)
sorted_hits = sorted(hit_list, key=lambda k: k['_score'], reverse=True)
tissue_grouper = itemgetter("tissue")
disease_grouper = itemgetter("description")
for tissue, group in groupby(sorted(sorted_hits, key = tissue_grouper), tissue_grouper):
if(tissue not in tissue_filter_array):
tissue_filter_array.append(tissue)
conditions_dict[tissue] = {'diseases': []}
inner_count = 0
for disease, inner_group in groupby(sorted(group, key = disease_grouper), disease_grouper):
if(disease not in disease_filter_array):
disease_filter_array.append(disease)
inner_count += 1
inner_group_list = list(inner_group)
gene_list = []
for gene_item in inner_group_list:
if(gene_item['name'] not in gene_list):
gene_list.append(gene_item['name'])
conditions_dict[tissue]['diseases'].append({'disease_desc': disease, 'disease_content': {'condition_item': inner_group_list, 'genes': gene_list}})
conditions_dict[tissue]['disease_count'] = inner_count
#print dumps(conditions_dict)
for k, v in conditions_dict.iteritems():
top_gene_overlap = 0
for disease in v['diseases']:
if(len(disease['disease_content']['genes']) > top_gene_overlap):
top_gene_overlap = len(disease['disease_content']['genes'])
conditions_array.append({'tissue': k, 'top_gene_overlap': top_gene_overlap, 'disease_count': v['disease_count'], 'disease_group': v})
sorted_cached_hits = sorted(conditions_array, key=lambda k: k['disease_count'], reverse=True)
cached_hits = {'hits': sorted_cached_hits, 'tissue_filters': tissue_filter_array, 'disease_filters': disease_filter_array}
conditions_search.save(
{
'searchId': computed_hash,
'condition': 'none specified',
'cached_hits': cached_hits
}
)
client.close()
if(pageNumber != 99):
temp_search_these_cached_hits = cached_hits['hits'][from_page:from_page + 5]
cached_hits['hits'] = temp_search_these_cached_hits
search_these_cached_hits = cached_hits
else:
search_these_cached_hits = cached_hits
return search_these_cached_hits
def get_cosmic_grouped_by_disease(queryTerms, pageNumber):
computed_hash = util.compute_query_list_hash(queryTerms)
#print computed_hash
hitCount = 0
from_page = (pageNumber - 1) * 15
if(from_page < 0):
from_page = 0
phenotype_network_data = {
'searchGroupTitle': 'Cosmic Phenotypes',
'clusterNodeName': "Cosmic",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
gene_condition_array = []
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.name": queryTerm}})
gene_condition_array.append({queryTerm: []})
search_body = {
'sort' : [
'_score'
],
'fields': ['node_list.description','node_list.tissue','node_list.name', 'node_list.cosmic_id', 'node_list.pubmed'], #, 'node_name'],
'query': {
'bool': {
'should': should_match,
'must_not': [{'match': {'node_list.description': 'NS'}}]
}
},
'from': 0,
'size': 60
#'from': from_page,
#'size': 15
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_cosmic_mutant',
body = search_body
)
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
phenotype_network_data['items'].append(gene_network_data_items)
return [phenotype_network_data]
else:
sorted_list = sorted(result['hits']['hits'], key=lambda k: k['fields']['node_list.description']) # sort by disease
#===============================
# GROUP BY DISEASE
#===============================
disease_groups = []
for key, group in groupby(sorted_list, lambda item: item['fields']['node_list.description']):
disease_group_item = {
'disease': key,
'tissues': list(group)
}
disease_groups.append(disease_group_item)
return disease_groups
def upcase_first_letter(s):
return s[0].upper() + s[1:]
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/SearchConditionsTab.py",
"copies": "1",
"size": "39357",
"license": "mit",
"hash": -4482864544533970000,
"line_mean": 36.6622009569,
"line_max": 197,
"alpha_frac": 0.5066188988,
"autogenerated": false,
"ratio": 3.8878790872271067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48944979860271065,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
from xml.dom import minidom
from bson.json_util import dumps
import xml.etree.ElementTree as ET
import requests
import pymongo
def get_drugbank_synonym(drugbank_id):
client = pymongo.MongoClient()
db = client.identifiers
drugbank_lookup = db.drugbank
result = drugbank_lookup.find_one({'drugbank_id': drugbank_id})
if(result is not None):
return result['drug_name']
else:
pubchemUrl = 'https://pubchem.ncbi.nlm.nih.gov/compound/'
r = requests.get(pubchemUrl + drugbank_id)
lines = list(r.iter_lines())
count=0
for idx, line in enumerate(lines):
if('data-pubchem-title=' in line):
location_start = line.find('data-pubchem-title=')
print location_start + 20
line_parsed1 = line[(location_start + 20):]
target_name = line_parsed1[:line_parsed1.find('"')]
a = {
'drugbank_id': drugbank_id,
'drug_name': target_name
}
print dumps(a)
drugbank_lookup.save(a)
return a['drug_name']
def extract_ids_from_xml_file():
file_names = ['drugbank1.xml','drugbank1b.xml','drugbank2.xml','drugbank3.xml','drugbank4.xml','drugbank5.xml','drugbank6.xml']
for file_name in file_names:
xmlfile = minidom.parse(file_name)
drug_list = xmlfile.getElementsByTagName('drug')
for drug in drug_list:
drugbank_id = ''
drug_product_name = ''
if drug.parentNode.tagName == 'drugbank':
drug_id_list = drug.getElementsByTagName('drugbank-id')
for drug_id in drug_id_list:
if(drug_id.getAttribute('primary') == 'true'):
drugbank_id = drug_id.firstChild.nodeValue
drug_name_list = xmlfile.getElementsByTagName('name')
if(drug_name_list.length > 0):
drug_product_name = drug_name_list[0].firstChild.nodeValue
print drugbank_id + '\t' + drug_product_name
def extract_ids_from_xml_file_tree():
tree = ET.parse('drugbank1.xml')
root = tree.getroot()
for drug_node in root.findall('drug'):
rank = drug_node.find('drugbank-id').text
print rank
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/DrugBank.py",
"copies": "1",
"size": "2387",
"license": "mit",
"hash": 8207706602907682000,
"line_mean": 33.1,
"line_max": 131,
"alpha_frac": 0.5651445329,
"autogenerated": false,
"ratio": 3.557377049180328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4622521582080328,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import json
class DrugSearchResult():
def __init__(self):
self.inferred_drugs = []
def add_drug_item(self, drug_item):
# Search the dictionary for the grouping term
# if we find it we append the grouped condition
# if we don't find it we insert the grouping term and add the grouped condition
foundGroupedItem = False
if(len(drug_item['drugs']) > 0):
gene_id = drug_item['index']
for drug_id in drug_item['drugs']:
for grouped_item in self.inferred_drugs:
if(grouped_item.key == drug_item.):
foundGroupedItem = True
grouped_item.groupedItemsCosmic.append(groupedCondition)
if(not foundGroupedItem):
myGroupedItem = GroupedItem()
myGroupedItem.groupedItemTerm = groupItemTermName
myGroupedItem.groupedItemsCosmic.append(groupedCondition)
self.grouped_items.append(myGroupedItem)
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4) | {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/models/DrugResultModel.py",
"copies": "1",
"size": "1193",
"license": "mit",
"hash": -2221320215847401700,
"line_mean": 35.1818181818,
"line_max": 87,
"alpha_frac": 0.5825649623,
"autogenerated": false,
"ratio": 4.003355704697986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5085920666997986,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aarongary'
import requests
import tarfile,sys
import urllib2
import json
from itertools import islice
from bson.json_util import dumps
from collections import Counter
def getEntrezGeneInfoByID(gene_id):
if(len(gene_id) > 0):
try:
entrez_url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=gene&id=" + str(gene_id) + "&retmode=json"
entrez_content = "";
entrez_data = {
'hits': []
}
for line in urllib2.urlopen(entrez_url):
entrez_content += line.rstrip() + " "
hit = {
'name': entrez_content,
'_score': 0,
'symbol': gene_id,
'source': 'Entrez'
}
entrez_data['hits'].append(hit)
except Exception as e:
print e.message
return {'hits': [{'symbol': gene_id, 'name': 'Entrez results: 0'}]}
return entrez_data
else :
return {'hits': [{'symbol': gene_id, 'name': 'not vailable'}]}
def getESPubMedByID(gene_id):
if(len(gene_id) > 0):
try:
entrez_url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=gene&id=" + str(gene_id) + "&retmode=json"
entrez_content = "";
entrez_data = {
'hits': []
}
#http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubchem&term=DB03585
for line in urllib2.urlopen(entrez_url):
entrez_content += line.rstrip() + " "
hit = {
'name': entrez_content,
'_score': 0,
'symbol': gene_id
}
entrez_data['hits'].append(hit)
except Exception as e:
print e.message
return {'hits': [{'symbol': gene_id, 'name': 'unable to return data'}]}
return entrez_data
else :
return {'hits': [{'symbol': gene_id, 'name': 'not vailable'}]}
def getTribeTermResolution(terms):
import requests
TRIBE_URL = "http://tribe.greenelab.com"
query_terms = terms.split(',')
gene_list = []
for term in query_terms:
gene_list.append(term)
payload = {'from_id': 'Entrez', 'to_id': 'Symbol', 'gene_list': gene_list, 'organism': 'Homo sapiens'}
r = requests.post(TRIBE_URL + '/api/v1/gene/xrid_translate', data=payload)
result_dictionary = r.json()
for gene_query, search_result in result_dictionary.iteritems():
print(gene_query + ": " + str(search_result))
return result_dictionary
def start_thumbnail_generator(es_id_list):
import requests
r = requests.get('http://localhost:3000/setThumbnailList/' + es_id_list)
return {'message': 'success'}
| {
"repo_name": "ucsd-ccbb/Oncolist",
"path": "src/restLayer/app/RestBroker.py",
"copies": "1",
"size": "2808",
"license": "mit",
"hash": 3738412820966717400,
"line_mean": 26.2621359223,
"line_max": 128,
"alpha_frac": 0.5413105413,
"autogenerated": false,
"ratio": 3.4752475247524752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9440868142711696,
"avg_score": 0.015137984668155895,
"num_lines": 103
} |
import random
import traceback
from copy import copy
random.seed()
class bracket:
def __init__(self, left, right, victor):
self.left=left
self.right=right
self.victor=victor
self.mapped=False
self.mappedTo=None
self.num=0
self.status=0
self.generation=0
def mapTo(self, target, used=set(), gen=0):
number=0
self.generation=gen
target.generation=gen
while(number in used):
number=random.randint(1,10000)
used=used|set([number])
self.num=number
target.num=number
self.mapped=True
self.mappedTo=target
target.mapped=True
target.mappedTo=self
if(type(self.left)==bracket):
used=used|self.left.mapTo(target.left, used, gen+1)
if(type(self.right)==bracket):
used=used|self.right.mapTo(target.right, used, gen+1)
return used
def nodes(self, indecies=set()):
if(not self.mapped):
return
indecies=indecies|set([self.num])
if(type(self.left)==bracket):
indecies=indecies|self.left.nodes(indecies)
if(type(self.right)==bracket):
indecies=indecies|self.right.nodes(indecies)
return indecies
def dump(self):
clone=copy(self)
indecies=list(self.mapTo(clone))
for item in indecies:
target=clone.fetch(item)
target.leftnum=target.left if(type(target.left)==str) else target.left.num
target.rightnum=target.right if(type(target.right)==str) else target.right.num
target.vicnum=target.victor if(type(target.victor)==str) else target.victor.num
output=""
for item in indecies:
target=clone.fetch(item)
output+=str(target.leftnum)+"::"+str(target.rightnum)+"::"+str(target.vicnum)+"::"+str(item)+"\n"
self.unmap()
return output.rstrip()
def unmap(self):
if(not self.mapped):
return
self.mapped=False
if(type(self.left)==bracket):
self.left.unmap()
if(type(self.right)==bracket):
self.right.unmap()
self.mappedTo.mapped=False
self.mappedTo.mappedTo=None
self.mappedTo=None
def correctness(self, key):
indecies=list(self.mapTo(key))
for item in indecies:
target=self.fetch(item)
## target.status=1 if(target.findVictor()==key.fetch(item).findVictor()) else (0 if(key.fetch(item).findVictor()=="FUTURE") else -1)
target.status = 0 if(key.fetch(item).findVictor()=="FUTURE") else (1 if(target.findVictor()==key.fetch(item).findVictor()) else -1)
for item in indecies:
target=self.fetch(item)
if(target.status==0):
target.status=target.recursiveUpdate()
self.unmap()
clone=copy(self)
indecies=list(self.mapTo(clone))
for item in indecies:
target=clone.fetch(item)
target.leftnum=target.left if(type(target.left)==str) else target.left.num
target.rightnum=target.right if(type(target.right)==str) else target.right.num
target.vicnum=target.victor if(type(target.victor)==str) else target.victor.num
output=""
for item in indecies:
target=clone.fetch(item)
output+=str(target.leftnum)+"::"+str(target.rightnum)+"::"+str(target.vicnum)+"::"+str(target.status)+"::"+str(item)+"\n"
self.unmap()
## print(output)
return output.rstrip()
def recursiveUpdate(self):
if(type(self.victor)==str):
return 0
elif(self.victor.status==0):
return self.victor.recursiveUpdate()
elif(self.victor.status==-1):
return -1
else:
return 0
def ppr(self, key, points):
indecies=list(self.mapTo(key))
for item in indecies:
target=self.fetch(item)
## target.status=1 if(target.findVictor()==key.fetch(item).findVictor()) else (0 if(key.fetch(item).findVictor()=="FUTURE") else -1)
target.status = 0 if(key.fetch(item).findVictor()=="FUTURE") else (1 if(target.findVictor()==key.fetch(item).findVictor()) else -1)
for item in indecies:
target=self.fetch(item)
if(target.status==0):
target.status=target.recursiveUpdate()
pointsRemaining=0
for item in indecies:
target=self.fetch(item)
if(target.status==0):
pointsRemaining+=points[target.generation]
return pointsRemaining
def numCorrect(self, key):
indecies=list(self.mapTo(key))
for item in indecies:
target=self.fetch(item)
## target.status=1 if(target.findVictor()==key.fetch(item).findVictor()) else (0 if(key.fetch(item).findVictor()=="FUTURE") else -1)
target.status = 0 if(key.fetch(item).findVictor()=="FUTURE") else (1 if(target.findVictor()==key.fetch(item).findVictor()) else -1)
## for item in indecies:
## target=self.fetch(item)
## if(target.status==0):
## target.status=target.recursiveUpdate()
total=0
for item in indecies:
target=self.fetch(item)
if(target.status==1):
total+=1
return total
def score(self, key, points):
indecies=list(self.mapTo(key))
total=0
for item in indecies:
if(self.fetch(item).findVictor()==key.fetch(item).findVictor()):
total+=points[self.fetch(item).generation]
self.unmap()
return total
def fetch(self, target):
if(not self.mapped):
return None
if(self.num==target):
return self
if(type(self.left)==bracket):
resLeft=self.left.fetch(target)
else:
resLeft=None
if(type(self.right)==bracket):
resRight=self.right.fetch(target)
else:
resRight=None
if(resLeft!=None):
return resLeft
if(resRight!=None):
return resRight
return None
def __str__(self):
return self.makeString()
def findVictor(self):
if(type(self.victor)==bracket):
return self.victor.findVictor()
return self.victor
def makeString(self, tablevel=0):
output=""
tabs=""
for i in range(tablevel):
tabs+="\t"
if(type(self.right)==bracket):
output+=self.right.makeString(tablevel+1)
else:
output+=tabs+"\t"+self.right
output+="\n"+tabs+"("
output+=""+self.findVictor()+")"
output+="\n"
if(type(self.left)==bracket):
output+=self.left.makeString(tablevel+1)
else:
output+=tabs+"\t"+self.left
return output
def seekNext(self, indecies):
gens=[]
highest=0
for item in indecies:
gen=self.fetch(item).generation
if((gen>highest) and self.fetch(item).victor=="FUTURE"):
highest=gen
for item in indecies:
if(self.fetch(item).generation == highest):
gens.append(item)
return gens
def setNext(self, target):
indecies = list(self.mapTo(target))
nums = self.seekNext(indecies)
for num in nums:
if(target.fetch(num).victor == target.fetch(num).left):
self.fetch(num).victor = self.fetch(num).left
else:
self.fetch(num).victor = self.fetch(num).right
self.unmap()
def assignBracket(nodes, target):
item=nodes[target]
victor=0
if(item.victor.isnumeric()):
if(item.victor==item.left):
victor=-1
elif(item.victor==item.right):
victor=1
if(item.left.isnumeric()):
item.left=assignBracket(nodes, item.left)
if(item.right.isnumeric()):
item.right=assignBracket(nodes, item.right)
if(victor==-1):
item.victor=item.left
elif(victor==1):
item.victor=item.right
return item
def bracketBuilder(source):
proto=source.rstrip().rsplit("\n")
nodes={}
for line in proto:
## print(line)
if(line==""):
print("Skipping blank line")
continue
data=line.rsplit("::")
try:
nodes[data[3]]=bracket(data[0], data[1], data[2])
except IndexError as e:
print(data)
traceback.print_exc()
raise SyntaxError("STOP")
return assignBracket(nodes, "0")
| {
"repo_name": "captianjroot/EquationBrackets",
"path": "bracket.py",
"copies": "1",
"size": "8753",
"license": "mit",
"hash": -4658322482003922000,
"line_mean": 35.3195020747,
"line_max": 143,
"alpha_frac": 0.5673483377,
"autogenerated": false,
"ratio": 3.6854736842105265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9635990047272207,
"avg_score": 0.02336639492766373,
"num_lines": 241
} |
import socket
import os
import time
import threading
import bracket
import weakref
#from switches import switch, case
import random
from datetime import date
lock = (date.today()>date(2014,4,8))
Shutdown=False
clients={}
sessions={}
submitted=set()
version=1.02
pointSystem=[16,8,4,2,1]
master=None
key=None
source=None
current=None
ticking=False
passwordKey="PeNiS"
dataLock=threading.Lock()
def decrypt(msg, key):
chrs=[]
for i in range(len(msg)):
enc_c=ord(msg[i])
key_c=ord(key[i % len(key)])
num = enc_c - key_c
if(num<0):
num+=256
msg_c=chr(num)
chrs.append(msg_c)
return "".join(chrs)
def encrypt(msg, key):
chrs=[]
for i in range(len(msg)):
msg_c=ord(msg[i])
key_c=ord(key[i % len(key)])
enc_c=chr((msg_c + key_c) % 256)
chrs.append(enc_c)
return "".join(chrs)
def newEncrypt(msg, key):
chrs=[]
for i in range(len(msg)):
msg_c=ord(msg[i])
key_c=ord(key[i%len(key)])
chrs.append(str(msg_c+key_c))
return " ".join(chrs)
def passwordTrans(msg, key):
return newEncrypt(decrypt(msg, key), key)
def newDecrypt(msg, key):
intake=msg.rsplit()
if(not intake[1].isnumeric()):
intake=passwordTrans(msg, key).rsplit()
chrs=[]
for i in range(len(intake)):
enc_c=int(intake[i])
key_c=ord(key[i%len(key)])
msg_c=chr(enc_c - key_c)
chrs.append(msg_c)
return "".join(chrs)
def updateClientList(user):
reader=open("clients\\@clientlist.brak")
intake=reader.readlines()
reader.close()
writer=open("clients\\@clientlist.brak", mode='w')
intake.append("\n"+user)
for line in intake:
writer.write(line)
writer.close()
class equation:
def __init__(self, name, equation):
self.name=name
self.eq=equation
class newInterface:
def __init__(self, preset=None):
if(preset==None):
self.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock=preset
## self.signaler=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def sockListen(self, port=4242):
self.sock.bind(('', port))
self.sock.listen(5)
def sockAccept(self):
return newInterface(preset=self.sock.accept()[0])
def sockConnect(self, address, port=4242):
self.sock.connect((address, port))
def sockSend(self, msg):
msg+="\\\r\n"
totalsent=0
while(totalsent<len(msg)):
sent = self.sock.send(msg[totalsent:].encode())
if(sent==0):
raise RuntimeError("Socket Connection Broken!")
totalsent+=sent
def sockRec(self):
msg=""
self.sock.settimeout(30)
while(len(msg.rsplit('\\'))==1):
intake=self.sock.recv(4096)
## print(intake.decode())
if(intake==b""):
raise RuntimeError("Socket Connection Broken!")
msg+=intake.decode()
self.sock.setblocking(True)
return msg.rsplit('\\')[0]
def sockDisconnect(self):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def sockIP(self):
return self.sock.getpeername()[0]
class Logger:
def __init__(self):
self.out=None
self.started=False
self.writeLock=threading.Lock()
self.stamp=0
def getReference(self):
return weakref.ref(self)
## return self
## def __call__(self):
## return self
def start(self):
if(self.started):
return
print("--LOG STARTED--")
self.stamp=time.time()
self.out=open("serverLog.txt", mode='w')
self.out.write("Bracket Server Log:\n")
self.out.write(time.strftime("%A %B %d <%H:%M>")+"\n")
self.out.flush()
self.started=True
def write(self, string):
self.writeLock.acquire()
if(not self.started):
self.start()
if((time.time()-self.stamp)>=300):
self.out.write("---<TIMESTAMP>---\n")
self.out.write(time.strftime("%A %B %d <%H:%M>")+"\n")
self.stamp=time.time()
self.out.write(string+"\n")
self.out.flush()
self.writeLock.release()
print(string)
def timestamp(self):
self.writeLock.acquire()
if(not self.started):
self.start()
return
self.out.write("---<TIMESTAMP>---\n")
self.out.write(time.strftime("%A %B %d <%H:%M>")+"\n")
self.stamp=time.time()
self.out.flush()
self.writeLock.release()
def close(self):
self.writeLock.acquire()
self.out.close()
self.started=False
print("--LOG CLOSED--")
self.writeLock.release()
class Client:
def __init__(self, name, password, email=""):
if(name.rsplit("::")[0]=="RELOAD" and password=="1948535"):
reader=open("clients\\"+name.rsplit("::")[1]+".brak")
intake=reader.readlines()
reader.close()
self.username=intake[0].rstrip()
self.password=intake[1].rstrip()
self.lastKnownScore=int(intake[2])
self.email=intake[3].rstrip()
else:
self.username=name
self.password=password
self.lastKnownScore=0
self.email=email
if(name!="@admin"):
self.save()
def save(self):
writer = open("clients\\"+self.username+".brak", mode='w')
writer.write(self.username+"\n")
writer.write(self.password+"\n")
writer.write(str(self.lastKnownScore)+"\n")
writer.write(self.email+"\n")
writer.close()
def getCorrect(self):
global submitted
global current
if(self.username in submitted):
reader=open("brackets\\"+self.username+".brak")
brak=bracket.bracketBuilder(reader.read())
reader.close()
return brak.numCorrect(current)
return 0
class echo(threading.Thread):
def __init__(self, interface, logref):
threading.Thread.__init__(self)
self.sock=interface
self.stop=False
self.logref=logref
def run(self):
self.tag="["+str(self.ident)+"]:"
msg=[""]
global Shutdown
global sessions
global ticking
self.logref().write(self.tag+"thread active\n")
while((not self.stop) and msg[0]!="9Z9"):
try:
msg=self.sock.sockRec().rsplit("::")
except socket.timeout:
self.logref().write(self.tag+"client timed out")
self.stop=True
self.sock.sockDisconnect()
continue
except ConnectionResetError:
self.logref().write(self.tag+"Client disconnected")
self.stop=True
continue
self.logref().write(self.tag+"recieved command")
if(msg[0]=="9Z9"):
continue
elif(msg[0]=="LIST"):
self.listBrackets(int(msg[1]))
elif(msg[0]=="OUT"):
self.signout(msg[1])
elif(msg[0]=="SIGNIN"):
self.signin(msg[1], msg[2])
elif(msg[0]=="VIEW"):
self.viewBracket(msg[1])
elif(msg[0]=="BRACKET"):
self.createBracket(int(msg[1]))
elif(msg[0]=="LOCKED"):
self.checkLock()
elif(msg[0]=="KEY"):
self.key()
elif(msg[0]=="SRC"):
self.source()
elif(msg[0]=="LEAD"):
self.leaderboard()
elif(msg[0]=="REG"):
self.register()
elif(msg[0]=="MARCO"):
self.logref().write(self.tag+"POLO!")
self.sock.sockSend("POLO")
elif(msg[0]=="SHUTDOWN" and sessions[int(msg[1])].username=="@admin"):
Shutdown=True
self.stop=True
self.logref().write(self.tag+"Preparing to shut down")
elif(msg[0]=="TICK" and sessions[int(msg[1])].username=="@admin"):
self.tick()
## elif(msg[0]=="UPDATE" and sessions[int(msg[1])].username=="@admin"):
## self.updateServer()
## elif(msg[0]=="PULSE"):
## if(ticking):
## self.sock.sock
else:
self.logref().write(self.tag+"Unknown command: "+str(msg))
self.logref().write(self.tag+"Disconnected")
def signin(self, username, password):
global clients
global sessions
global passwordKey
global dataLock
self.logref().write(self.tag+"Signing in...")
## self.sock.sockSend("RDY")
## authkey=self.sock.sockRec().rsplit("::")
## username=authkey.pop(0).lower()
## password=authkey.pop(0)
if((username in clients) and (clients[username].password==
newEncrypt(password, passwordKey))):
num = random.randint(0,10000)
self.logref().write(self.tag+"Welcome, "+username)
while(num in sessions):
num = random.randint(0, 10000)
dataLock.acquire()
sessions[num]=clients[username]
dataLock.release()
self.logref().write(self.tag+"Assigned session "+str(num))
else:
num= -1
username="BAD"
self.sock.sockSend(str(num)+"::"+username)
def register(self):
global clients
global passwordKey
global dataLock
self.logref().write(self.tag+"Registering...")
self.sock.sockSend("RDY")
username=self.sock.sockRec()
self.logref().write(self.tag+"recieved username "+username)
if(username in clients):
self.sock.sockSend("BAD")
self.logref().write(self.tag+"Username was taken")
return
self.sock.sockSend("GOOD")
password=newEncrypt(self.sock.sockRec(), passwordKey)
email=self.sock.sockRec()
dataLock.acquire()
clients[username]=Client(username, password, email)
updateClientList(username)
self.logref().write(self.tag+"Client "+username+" was registered")
dataLock.release()
def key(self):
global key
self.logref().write(self.tag+"sending key")
## print(key)
self.sock.sockSend(key)
def source(self):
global source
self.logref().write(self.tag+"Sending source")
self.sock.sockSend(source)
def createBracket(self, session):
global sessions
global submitted
global lock
global current
global dataLock
global pointSystem
self.logref().write(self.tag+"Creating new bracket")
if((session not in sessions) or lock):
self.logref().write(self.tag+"DUN")
self.stop=True
self.sock.sockDisconnect()
return
user=sessions[session]
self.logref().write(self.tag+"REC")
self.sock.sockSend("GO")
intake = self.sock.sockRec()
self.logref().write(self.tag+"Aquiring Lock")
dataLock.acquire()
writer = open("brackets\\"+user.username+".brak", mode='w')
writer.write(intake)
writer.close()
submitted= submitted|set([user.username])
writer=open("clients\\@submitted.brak",mode='w')
for thing in list(submitted):
writer.write(thing.rstrip()+'\n')
writer.close()
self.logref().write(self.tag+"Bracket made")
reader=open("brackets\\"+user.username+".brak")
temp = bracket.bracketBuilder(reader.read())
reader.close()
writer = open("brackets\\"+user.username+".brak", mode='w')
writer.write(temp.dump())
writer.close()
sessions[session].lastKnownScore=temp.score(current, pointSystem)
dataLock.release()
self.logref().write(self.tag+"Bracket reformatted")
self.sock.sockSend("GO")
def listBrackets(self, session):
global submitted
global lock
global sessions
self.logref().write(self.tag+"sending user list")
if(session not in sessions):
self.logref().write(self.tag+"Bad session")
## print(session, type(session))
## print(sessions)
self.stop=True
self.sock.sockDisconnect()
return
if((not lock) and sessions[session].username!="@admin"):
if(sessions[session].username in submitted):
self.sock.sockSend(sessions[session].username+"\n")
else:
self.sock.sockSend("@NO AVALIABLE USERS\n")
return
names=""
for user in list(submitted):
names+=user+"\n"
names+="@MasterBracket\n"
self.sock.sockSend(names)
def checkLock(self):
global lock
global version
self.logref().write(self.tag+"Checking lock status")
self.sock.sockSend("LOCKED" if(lock) else("OPEN"))
self.sock.sockSend(str(version))
def leaderboard(self):
global clients
global lock
global submitted
self.logref().write(self.tag+"sending leaderboard")
if(not lock):
self.sock.sockDisconnect()
return
unsorted=[clients[name] for name in list(submitted)]
unsorted.sort(key= lambda x:(x.lastKnownScore)+(x.getCorrect()/100), reverse=True)
## unsorted.reverse()
unsorted=unsorted[0:10]
out="\n".join([user.username+"::"+str(user.lastKnownScore) for user in unsorted])
self.logref().write(self.tag+"Leaders:")
self.logref().write(self.tag+str(out))
self.sock.sockSend(out)
def viewBracket(self, user):
global submitted
global current
global dataLock
global pointSystem
global clients
self.logref().write(self.tag+"viewing bracket")
if(user.rstrip()=="@MasterBracket"):
self.logref().write(self.tag+"Sending current master bracket")
self.sock.sockSend("0")
self.sock.sockSend("0")
self.sock.sockSend(current.correctness(current))
return
elif(user not in submitted):
self.logref().write(self.tag+"BAD")
self.stop=True
self.sock.sockDisconnect()
return
dataLock.acquire()
reader = open("brackets\\"+user+".brak")
intake=reader.read()
reader.close()
dataLock.release()
outBracket= bracket.bracketBuilder(intake)
self.logref().write(self.tag+"Updating score of requested bracket")
outScore=outBracket.score(current, pointSystem)
self.sock.sockSend(str(outScore))
self.sock.sockSend(str(outBracket.ppr(current, pointSystem)))
output=outBracket.correctness(current)
dataLock.acquire()
clients[user].lastKnownScore=outScore
clients[user].save()
dataLock.release()
self.sock.sockSend(output)
def signout(self, session):
global sessions
global dataLock
self.logref().write(self.tag+"signing out")
if(session not in sessions):
self.stop=True
self.sock.sockDisconnect()
return
dataLock.acquire()
sessions= sessions-set([session])
dataLock.release()
## self.sock.sockSend("GOOD")
def tick(self):
global master
global current
global dataLock
global submitted
global ticking
global clients
global pointSystem
global lock
self.logref().write(self.tag+"Tick active. Acquiring lock!")
dataLock.acquire()
ticking=True
self.logref().write(self.tag+"Updating @current.brak")
current.setNext(master)
writer = open("brackets\\@current.brak", mode='w')
writer.write(current.dump())
writer.close()
self.logref().write(self.tag+"@current.brak updated. Rescoring users")
for user in list(submitted):
reader = open("brackets\\"+user+".brak")
brak = bracket.bracketBuilder(reader.read())
reader.close()
clients[user].lastKnownScore=brak.score(current, pointSystem)
clients[user].save()
self.logref().write(self.tag+"finished scoring "+str(user))
## self.logref().write(self.tag+"Updating time lock")
## lock = (date.today()>=date(2014,4,8))
## self.logref().write(self.tag+"Time Lock status: "+ str(lock))
dataLock.release()
ticking=False
self.sock.sockSend("DONE")
self.logref().write(self.tag+"Tick complete. lock released")
serverLog = Logger()
reader=open("clients\\@clientlist.brak")
clientNames=reader.readlines()
reader.close()
for name in clientNames:
clients[name.rstrip()]=Client("RELOAD::"+name.rstrip(), "1948535")
reader=open("clients\\@submitted.brak")
submittedNames=reader.readlines()
reader.close()
submitted=set([name.rstrip() for name in submittedNames])
reader=open("brackets\\@master.brak")
masterSource=reader.read()
reader.close()
master=bracket.bracketBuilder(masterSource)
reader=open("brackets\\@current.brak")
currentSource=reader.read()
reader.close()
current=bracket.bracketBuilder(currentSource)
reader=open("brackets\\@key.brak")
key="".join(reader.readlines())
reader.close()
reader=open("brackets\\@source.brak")
source="".join(reader.readlines())
reader.close()
clients["@admin"]=Client("@admin", newEncrypt("BRACKETS_ADMIN!", passwordKey), "none@none.com")
primary=newInterface()
primary.sockListen(4242)
socks=[]
sockSize=0
serverLog.write("Startup complete. Server Active")
while(not Shutdown):
socks.append(echo(primary.sockAccept(), serverLog.getReference()))
socks[-1].start()
sockSize+=1
serverLog.write("New client connected")
if(sockSize>=10):
serverLog.write("Checking for dead threads")
removedSocks=0
for i in range(len(socks)):
if(socks[i]==None):
continue
if(not socks[i].is_alive()):
socks[i].join()
socks[i]=None
removedSocks+=1
sockSize-=removedSocks
serverLog.write("\nRemoved "+str(removedSocks)+" dead threads")
serverLog.write("Joining threads")
for thing in socks:
if(thing!=None):
thing.join()
primary.sock.close()
serverLog.write("FINISHED")
serverLog.timestamp()
serverLog.close()
| {
"repo_name": "captianjroot/EquationBrackets",
"path": "bracketServer.py",
"copies": "1",
"size": "18913",
"license": "mit",
"hash": 336747781909883400,
"line_mean": 33.0161870504,
"line_max": 95,
"alpha_frac": 0.5691852165,
"autogenerated": false,
"ratio": 3.7645302547770703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.974015080242014,
"avg_score": 0.018712933771386102,
"num_lines": 556
} |
__author__ = 'Aaron Hosford'
__all__ = [
'ScoringMeasure',
]
class ScoringMeasure:
"""A generalized categorization of a parse tree node within its parse
tree which acts as a key for storing/retrieving scores and their
accuracies. Roughly analogous to XCS (Accuracy-based Classifier System)
rules."""
def __init__(self, value):
self._value = value
self._hash = hash(value)
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, ScoringMeasure):
return NotImplemented
return self._hash == other._hash and self._value == other._value
def __ne__(self, other):
if not isinstance(other, ScoringMeasure):
return NotImplemented
return self._hash != other._hash or self._value != other._value
def __str__(self):
return str(self._value)
def __repr__(self):
return type(self).__name__ + "(" + repr(self._value) + ")"
@property
def value(self):
return self._value
| {
"repo_name": "hosford42/pyramids",
"path": "pyramids/scoring.py",
"copies": "1",
"size": "1050",
"license": "mit",
"hash": -1750060305961810700,
"line_mean": 26.6315789474,
"line_max": 75,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 3.9923954372623576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092395437262358,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron Hosford'
__all__ = [
'Timeout',
'GrammarParserError',
'GrammarSyntaxError',
]
class Timeout(Exception):
pass
class GrammarParserError(Exception):
def __init__(self, msg=None, filename=None, lineno=1, offset=1,
text=None):
super().__init__(msg, (filename, lineno, offset, text))
self.msg = msg
self.args = (msg, (filename, lineno, offset, text))
self.filename = filename
self.lineno = lineno
self.offset = offset
self.text = text
def __repr__(self):
return (
type(self).__name__ +
repr(
(
self.msg,
(self.filename, self.lineno, self.offset, self.text)
)
)
)
def set_info(self, filename=None, lineno=None, offset=None, text=None):
if filename is not None:
self.filename = filename
if lineno is not None:
self.lineno = lineno
if offset is not None:
self.offset = offset
if text is not None:
self.text = text
self.args = (
self.msg,
(self.filename, self.lineno, self.offset, self.text)
)
class GrammarSyntaxError(GrammarParserError, SyntaxError):
def __init__(self, msg, filename=None, lineno=1, offset=1, text=None):
super().__init__(msg, (filename, lineno, offset, text))
def __repr__(self):
return super(GrammarParserError, self).__repr__()
| {
"repo_name": "hosford42/pyramids",
"path": "pyramids/exceptions.py",
"copies": "1",
"size": "1541",
"license": "mit",
"hash": 7860788195168563000,
"line_mean": 25.5689655172,
"line_max": 75,
"alpha_frac": 0.5340687865,
"autogenerated": false,
"ratio": 4.120320855614973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5154389642114973,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron Hosford'
import logging
import unittest
import xcs
import xcs.bitstrings as bitstrings
import xcs.scenarios
class TestBitString(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_numpy = False
def run(self, result=None):
if bitstrings.numpy_is_available():
self.use_numpy = True
super().run(result)
self.use_numpy = False
super().run(result)
def setUp(self):
if self.use_numpy:
bitstrings.use_numpy()
else:
bitstrings.use_pure_python()
self.bitstring = bitstrings.BitString('10010101') # 149
def test_using(self):
if self.use_numpy:
self.assertTrue(bitstrings.using_numpy())
self.assertTrue('numpy' in bitstrings.BitString.__module__)
bitstrings.use_pure_python()
self.assertFalse(bitstrings.using_numpy())
self.assertTrue('python' in bitstrings.BitString.__module__)
bitstrings.use_numpy()
self.assertTrue(bitstrings.using_numpy())
self.assertTrue('numpy' in bitstrings.BitString.__module__)
logging.disable(logging.CRITICAL)
try:
xcs.test(scenario=xcs.scenarios.MUXProblem(1000))
finally:
logging.disable(logging.NOTSET)
else:
self.assertFalse(bitstrings.using_numpy())
self.assertTrue('python' in bitstrings.BitString.__module__)
logging.disable(logging.CRITICAL)
try:
xcs.test(scenario=xcs.scenarios.MUXProblem(1000))
finally:
logging.disable(logging.NOTSET)
def test_from_int(self):
bitstring = bitstrings.BitString(149, 8)
self.assertTrue(self.bitstring == bitstring)
def test_from_string(self):
self.assertTrue(
bitstrings.BitString(str(self.bitstring)) == self.bitstring
)
def test_random(self):
previous = bitstrings.BitString.random(len(self.bitstring), .5)
self.assertEqual(len(previous), len(self.bitstring))
for i in range(10):
current = bitstrings.BitString.random(
len(self.bitstring),
1 / (i + 2)
)
self.assertEqual(len(current), len(self.bitstring))
if previous != current:
break
previous = current
else:
self.fail("Failed to produce distinct random bitstrings.")
def test_crossover_template(self):
previous = bitstrings.BitString.crossover_template(
len(self.bitstring),
2
)
self.assertEqual(len(previous), len(self.bitstring))
for i in range(10):
current = bitstrings.BitString.crossover_template(
len(self.bitstring),
i + 1
)
self.assertEqual(len(current), len(self.bitstring))
if previous != current:
break
previous = current
else:
self.fail("Failed to produce distinct crossover templates.")
def test_any(self):
self.assertTrue(self.bitstring.any())
self.assertFalse(
bitstrings.BitString(0, len(self.bitstring)).any()
)
self.assertTrue(
bitstrings.BitString(-1, len(self.bitstring)).any()
)
def test_count(self):
self.assertTrue(self.bitstring.count() == 4)
self.assertTrue(
bitstrings.BitString(0, len(self.bitstring)).count() == 0
)
self.assertTrue(
bitstrings.BitString(-1, len(self.bitstring)).count() ==
len(self.bitstring)
)
def test_and(self):
self.assertEqual(self.bitstring, self.bitstring & self.bitstring)
self.assertEqual(
self.bitstring & ~self.bitstring,
bitstrings.BitString([0] * len(self.bitstring))
)
def test_or(self):
self.assertEqual(self.bitstring, self.bitstring | self.bitstring)
self.assertEqual(
self.bitstring | ~self.bitstring,
bitstrings.BitString([1] * len(self.bitstring))
)
def test_xor(self):
mask = bitstrings.BitString.random(len(self.bitstring))
self.assertEqual(
mask ^ mask,
bitstrings.BitString([0] * len(self.bitstring))
)
self.assertEqual(self.bitstring, (self.bitstring ^ mask) ^ mask)
def test_invert(self):
self.assertNotEqual(self.bitstring, ~self.bitstring)
self.assertEqual(self.bitstring, ~~self.bitstring)
def test_plus(self):
self.assertEqual(
self.bitstring + ~self.bitstring,
bitstrings.BitString(list(self.bitstring) +
list(~self.bitstring))
)
def test_slice(self):
self.assertEqual(self.bitstring, self.bitstring[:])
self.assertEqual(self.bitstring,
self.bitstring[:2] + self.bitstring[2:])
self.assertEqual(self.bitstring,
self.bitstring[-len(self.bitstring):])
self.assertEqual(self.bitstring,
self.bitstring[:len(self.bitstring)])
self.assertEqual(
self.bitstring,
self.bitstring[0:3] + self.bitstring[3:len(self.bitstring)]
)
def test_index(self):
self.assertEqual(
list(self.bitstring),
[self.bitstring[index] for index in range(len(self.bitstring))]
)
self.assertEqual(
list(self.bitstring),
[self.bitstring[index]
for index in range(-len(self.bitstring), 0)]
)
def main():
unittest.main()
if __name__ == "__main__":
main()
| {
"repo_name": "hosford42/xcs",
"path": "tests/test_bitString.py",
"copies": "1",
"size": "5863",
"license": "bsd-3-clause",
"hash": 6341346287876807000,
"line_mean": 31.7541899441,
"line_max": 75,
"alpha_frac": 0.5701859116,
"autogenerated": false,
"ratio": 4.051831375259157,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122017286859157,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron Hosford'
import unittest
from xcs.bitstrings import BitString
from xcs.scenarios import HaystackProblem
class TestHaystackProblem(unittest.TestCase):
def setUp(self):
self.scenario = HaystackProblem(10)
def test_get_possible_actions(self):
actions = self.scenario.get_possible_actions()
self.assertTrue(len(actions) == 2)
self.assertTrue(True in actions)
self.assertTrue(False in actions)
def test_sense(self):
previous = self.scenario.sense()
self.assertIsInstance(previous, BitString)
self.assertTrue(len(previous) == self.scenario.input_size)
while self.scenario.more():
current = self.scenario.sense()
self.assertIsInstance(current, BitString)
self.assertTrue(len(current) == self.scenario.input_size)
if current != previous:
break
else:
self.fail("All situations are the same.")
def test_execute(self):
situation = self.scenario.sense()
value = situation[self.scenario.needle_index]
self.assertEqual(1, self.scenario.execute(value))
self.assertEqual(0, self.scenario.execute(not value))
def test_more(self):
self.scenario.reset()
for _ in range(self.scenario.initial_training_cycles):
self.scenario.sense()
self.assertTrue(self.scenario.more())
self.scenario.execute(False)
self.assertFalse(self.scenario.more())
def main():
unittest.main()
if __name__ == "__main__":
main()
| {
"repo_name": "hosford42/xcs",
"path": "tests/test_haystackProblem.py",
"copies": "1",
"size": "1588",
"license": "bsd-3-clause",
"hash": -3121617109990896000,
"line_mean": 29.5384615385,
"line_max": 69,
"alpha_frac": 0.6353904282,
"autogenerated": false,
"ratio": 4.061381074168798,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007122507122507122,
"num_lines": 52
} |
__author__ = "Aaron Huus"
__email__ = "huus@thehumangeo.com"
from random import choice
import sys
import json
import codecs
import glob
import wikipedia
import os.path
from BeautifulSoup import BeautifulSoup
import zipfile
import ntpath
fridge_words = ["service", "backend", "cache", "miss", "database", "op", "monitor", "data", "availability",
"scalability", "not", "ready", "for", "app", "launch", "the", "in", "everything",
"hit", "hack", "cloud", "a", "enterprise", "oriented", "system", "critical", "unicorn", "host", "latency",
"with", "micro", "performance", "distributed", "need", "don't", "get", "high", "run", "like", "time",
"over", "big", "mobile", "frontend", "responsive", "but", "always", "ironic","web", "still", "so", "boring",
"open", "I'm", "framework", "it", "low", "yet", "you", "only", "what", "which", "out", "social", "network", "."]
def get_model(requested_order):
if isinstance(requested_order, int):
requested_order = str(requested_order)
if os.path.isfile("models.json"):
result = read_model_from_disk("models.json")
return result["models"][requested_order]["model"]
information = get_wikipedia_articles(fridge_words)
#information = read_model_from_disk("wikipedida_articles.json")
write_model_to_disk(information, "wiki_articles.txt")
result = {
"models" : {},
}
for order in range(1,6):
word_count, model = generate_model(order, information)
result["models"][order] = {}
result["models"][order]["fragments"] = len(model.keys())
result["models"][order]["model"] = model
print "Generated model for order %s with %s fragments and %s examples" % (order, len(model.keys()), word_count)
result["models"][order]["examples"] = word_count
write_model_to_disk(result,"models.json")
return result["models"][requested_order]["model"]
def generate_model(order, articles, model=None, word_count=0):
for article in articles:
for word in range(0,len(article) - order):
fragment = strip_word(article[word])
for j in range(1,order):
fragment.extend(strip_word(article[word+j]))
next_word = article[word + order]
if not on_fridge(fragment, next_word):
continue
fragment = ' '.join(fragment)
if fragment not in model:
model[fragment] = {}
if next_word not in model[fragment]:
model[fragment][next_word] = 1
else:
model[fragment][next_word] += 1
word_count += 1
return word_count, model
def add_word_to_model()
def strip_word(word):
return ["".join(c for c in word.strip().lower() if c not in ('!',',',';',':'))]
def on_fridge(fragment, next_word):
_fridge_words = list(fridge_words)
if fragment[0] == ".":
return False
for word in fragment:
if word not in _fridge_words:
return False
_fridge_words.remove(word)
if next_word not in _fridge_words:
return False
_fridge_words.remove(next_word)
return True
def get_wikipedia_articles(search_words):
articles = []
for count, word in enumerate(search_words):
results = wikipedia.search(word, results=75)
for result in results:
try:
article = wikipedia.page(result)
except:
print "Skipping disambiguation page for %s ..." % result
continue
articles.append(article.content.replace("."," . ").split())
print "Read %s articles about the term: \"%s\"" % (len(results), word)
percent_complete = float((count+1)) / float(len(search_words)) * 100
print "Completed %s terms: %f percent\n" % (count+1, percent_complete)
return articles
def get_words_from_books(directory):
books = []
for book in glob.glob(directory):
words = get_words_from_utf8_file(book)
books.append(words)
return books
def get_words_from_utf8_file(book_path):
print book_path
with codecs.open(book_path, encoding='utf-8') as book:
BOM = codecs.BOM_UTF8.decode('utf8')
words = []
for line in book:
print line
words.extend(line.lstrip(BOM).split())
print "Read %s" % os.path.basename(book_path)
return words
def get_words_from_file(book_path):
print book_path
with codecs.open(book_path) as book:
words = []
for line in book:
print line
words.extend(line.split())
print "Read %s" % os.path.basename(book_path)
return words
def get_words_from_zip2(zip_path):
base_path , filename = ntpath.split(zip_path)
book_base = filename.split(".")[0]
book_name = book_base + ".txt"
download_path = "/Users/Aaron/Downloads/"
book_path = download_path + book_name
zfile = zipfile.ZipFile(zip_path)
zfile.extractall(download_path)
if not os.path.isfile(book_path):
book_path = book_path.replace("_", "-")
if not os.path.isfile(book_path):
book_path = download_path + book_base.split("_")[0] + "/" + book_name
if not os.path.isfile(book_path):
book_path = download_path + book_base.split("_")[0] + "/" + book_name.replace("_","-")
words = get_words_from_file(book_path)
os.remove(book_path)
return words
def get_words_from_zip(zip_path):
words = []
try:
zfile = zipfile.ZipFile(zip_path)
except:
print "Error opening %s" % zip_path
for finfo in zfile.infolist():
try:
book = zfile.open(finfo)
except:
print "Cannot open %s" % finfo.filename
return []
if finfo.filename.endswith('.txt'):
for line in book:
words.extend(line.replace("."," . ").split())
return words
def update_model_from_gutenberg(iso_mount_point):
book_count = 0
for page_path in glob.glob(iso_mount_point + "/indexes/AUTHORS_*.HTML"):
print "Reading all books from %s" % page_path
books=[]
with open(page_path, 'r') as page:
data = page.read()
soup = BeautifulSoup(data)
book_links = soup.findAll('a')
for book_link in book_links:
if ("etext" in book_link['href']) and ("(English)" in book_link.parent.text):
base_path , filename = ntpath.split(page_path)
book_page_path = base_path + "/" + book_link['href']
with open(book_page_path, 'r') as book_page:
book_page_data = book_page.read()
soup2 = BeautifulSoup(book_page_data)
book_path = soup2.findAll('table')[1].findAll('td')[1].find('a')
book_zip_path = base_path + "/" + book_path['href']
if book_zip_path.endswith(".zip"):
words = get_words_from_zip(book_zip_path)
else:
continue
books.append(words)
book_count = book_count + 1
if book_count % 100 == 0:
print "Read %s books" % book_count
update_model(books)
def generate_sentence(order, words_in_a_sentence, number_of_sentences, seed_original=None):
model = get_model(order)
for loop in range(0, number_of_sentences):
sentence = []
while len(sentence) < words_in_a_sentence:
_fridge_words = list(fridge_words)
if not seed_original:
first_word = choice(fridge_words)
while first_word == ".":
first_word = choice(fridge_words)
seed = [first_word]
for seed_number in range(1, order):
seed_word = get_next_word(get_model(1), seed[-1])
if seed_word and (seed_word in _fridge_words):
seed.append(seed_word)
else:
break
else:
seed = seed_original
current_fragment = seed[0:order]
for word in current_fragment:
try:
_fridge_words.remove(word)
except:
pass
sentence = list(current_fragment)
for i in range(0, words_in_a_sentence - order):
next_word = get_next_word(model, current_fragment)
if next_word and (next_word in _fridge_words):
sentence.append(next_word)
_fridge_words.remove(next_word)
current_fragment = list(sentence[order*-1:])
else:
break
#print ' '.join(sentence)
print ' '.join(sentence)
def get_next_word(model, fragment):
words = []
if isinstance(fragment, list):
fragment = ' '.join(fragment)
if fragment not in model.keys():
return None
for word in model[fragment].keys():
for count in range(0, model[fragment][word]):
words.append(word)
return choice(model[fragment].keys())
def update_model(information):
print "Updating model ..."
result = read_model_from_disk("models.json")
for order in range(1,6):
word_count, model = generate_model(order, information, get_model(order), result["models"][str(order)]["examples"])
result["models"][str(order)]["fragments"] = len(model.keys())
result["models"][str(order)]["model"] = model
print "Updated model for order %s with %s fragments and %s examples" % (order, len(model.keys()), word_count)
result["models"][str(order)]["examples"] = word_count
write_model_to_disk(result,"models.json")
def write_model_to_disk(model, filename):
with open(filename, 'wb') as f:
json.dump(model, f)
def read_model_from_disk(filename):
with open(filename, 'rb') as f:
model = json.load(f)
return model
if __name__ == "__main__":
order = int(sys.argv[1])
words_in_a_sentence = int(sys.argv[2])
number_of_sentences = int(sys.argv[3])
#information = get_words_from_books("books/*.txt")
#search_words = ["computer science", "software engineering", "website", "internet", "software", "java", "python", "geography",
#"private network", "intranet", "scale", "delay", "tcp/ip", "facebook", "twitter"]
#information = get_wikipedia_articles(search_words)
update_model_from_gutenberg("/Volumes/PGDVD_2010_04_RC2")
#generate_sentence(order, words_in_a_sentence, number_of_sentences)
| {
"repo_name": "ahuus1/talking-fridge",
"path": "speak.py",
"copies": "1",
"size": "10796",
"license": "mit",
"hash": -2421137960596540400,
"line_mean": 38.401459854,
"line_max": 130,
"alpha_frac": 0.5614116339,
"autogenerated": false,
"ratio": 3.755130434782609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4816542068682609,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron J. Masino'
from sklearn import metrics
import numpy as np
def negpv(y_actual, y_predicted):
'''calculates the negative predictive value as tn/(tn+fn), where tn = true predicted negatives
fn = false predicted negatives
assuems negative cases are indicated by 0 and positive cases by 1'''
ya = np.array(y_actual)
yp = np.array(y_predicted)
ti = np.where(ya==0) #find indices for actual negatives
if len(ti[0])==0: return -1 #no actual negatives
tn = 0
neg_calls = 0
for idx in range(len(ya)):
if ya[idx]==0 and yp[idx]==0: tn +=1
if yp[idx] == 0: neg_calls += 1
#tn = len(ti[0]) - np.sum(yp[ti])
if neg_calls>0: return tn/float(neg_calls)
else: return np.nan
#return tn/float(len(np.where(yp==0)[0]))
def specificity(y_actual, y_predicted):
'''calculates specificity as #predicted true neg / #actual negatives
assumed that negative cases are indicated as 0 and positive cases indicated by 1'''
ya = np.array(y_actual)
yp = np.array(y_predicted)
ti = np.where(ya==0) #find indices for actual negatives
if len(ti[0])==0: return -1 #no actual negatives
#of true negatives is total negatives (ti), minus false positives (sum(yp[ti], i.e. count of non-zero values
#in yp at the indices where ya is negative
#true neg = len(ti) - sum(yp[ti])
#spec = true_neg/len(ti)
return 1 - np.sum(yp[ti])/float(len(ti[0]))
class PerformanceMetrics:
def __init__(self, y_actual, y_predicted):
#ensure we're using ndarray
ya = np.array(y_actual)
yp = np.array(y_predicted)
self.f1 = metrics.f1_score(ya, yp)
self.precision = metrics.precision_score(ya, yp)
self.recall = metrics.recall_score(ya, yp)
self.ppv = self.precision
self.npv = negpv(ya,yp)
self.sensitivity = self.recall
self.specificity = specificity(ya, yp)
self.pred_pos = np.sum(yp)
self.act_pos = np.sum(ya)
self.pred_neg = len(yp) - self.pred_pos
self.act_neg = len(ya) - self.act_pos
self.accuracy = len(np.where(ya==yp)[0])/float(len(ya))
class KFoldPerformanceMetrics:
def __init__(self, pms):
'''given a collection of PerformanceMetric objects, this object
creates means and standard deviations for each metric'''
self.pms = pms
self.f1_mean = np.mean([pm.f1 for pm in pms])
self.f1_std = np.std([pm.f1 for pm in pms])
self.precision_mean = np.mean([pm.precision for pm in pms])
self.precision_std = np.std([pm.precision for pm in pms])
self.recall_mean = np.mean([pm.recall for pm in pms])
self.recall_std = np.std([pm.recall for pm in pms])
self.ppv_mean = np.mean([pm.ppv for pm in pms])
self.ppv_std = np.std([pm.ppv for pm in pms])
self.npv_mean = np.mean([pm.npv for pm in pms])
self.npv_std = np.std([pm.npv for pm in pms])
self.sensitivity_mean = np.mean([pm.sensitivity for pm in pms])
self.sensitivity_std = np.std([pm.sensitivity for pm in pms])
self.specificity_mean = np.mean([pm.specificity for pm in pms])
self.specificity_std = np.std([pm.specificity for pm in pms])
self.accuracy_mean = np.mean([pm.accuracy for pm in pms])
self.accuracy_std = np.std([pm.accuracy for pm in pms])
def printPerformanceMetrics(y_actual, y_predicted):
'''precision = tp / (tp + fp) where tp is number of true positives in the predicted labels, and fp is the number of
false positives in the predicted labels
recall = tp / (tp + fn) where tp is same as precision, and fn is the number of false negatives is the predicted labels
f1 = 2 * (precision * recall) / (precision + recall)'''
pm = PerformanceMetrics(y_actual, y_predicted)
print('Accuracy:\t\t\t{0}'.format(pm.accuracy))
print('F1-Score:\t\t\t{0}'.format(pm.f1))
print('PPV/Precision tp/pp:\t\t{0}'.format(pm.ppv))
print('NPV tn/pn:\t\t\t{0}'.format(pm.npv))
print('Sensitivity/Recall tp/[tp+fn]:\t{0}'.format(pm.recall))
print('Specificity tn/[tn+fp]:\t\t{0}'.format(pm.specificity))
return pm
def printKFoldPerformanceMetrics(performanceMetrics):
pm = KFoldPerformanceMetrics(performanceMetrics)
print('Accuracy:\t\t\t{0}'.format(pm.accuracy_mean))
print('F1-Score:\t\t\t{0}'.format(pm.f1_mean))
print('PPV/Precision tp/pp:\t\t{0}'.format(pm.ppv_mean))
print('NPV tn/pn:\t\t\t{0}'.format(pm.npv_mean))
print('Sensitivity/Recall tp/[tp+fn]:\t{0}'.format(pm.recall_mean))
print('Specificity tn/[tn+fp]:\t\t{0}'.format(pm.specificity_mean))
return pm
def learningCurves(predictor, X_train, y_train, X_other, y_other, minI=0):
trainErr = []
cvErr = []
for i in range(minI,len(y_train)):
predictor.fit(X_train[0:i], y_train[0:i])
trainErr.append(1-predictor.score(X_train[0:i], y_train[0:i]))
cvErr.append(1-predictor.score(X_other, y_other))
return(trainErr, cvErr)
def findKFoldMax(pmDict, metric_id='f1', rule_out_uniform=True):
'''finds the best result in pmDict based on the metric_id.
If rule_out_uniform is True, the selection rules out any choice
that predicts all positive or all negative'''
max_key = 0
max_metric = 0
means = {}
for key in pmDict.keys():
#pms = pmDict[key]
#is_uniform = rule_out_uniform
#if is_uniform:
# for pm in pms:
# total_cases = pm.act_pos + pm.act_neg
# is_uniform = is_uniform and (pm.pred_pos == total_cases or pm.pred_neg == total_cases)
#if is_uniform: print 'WARNING: key={0} ruled out due to uniformity'.format(key)
temp = None
if metric_id == 'f1':
temp = [pm.f1 for pm in pmDict[key]]
temp = np.mean(temp)
if temp>max_metric:
max_metric = temp
max_key = key
means[key]=temp
return (max_metric, max_key, means)
| {
"repo_name": "chop-dbhi/arrc",
"path": "learn/metrics.py",
"copies": "1",
"size": "5990",
"license": "mit",
"hash": -6672611512489178000,
"line_mean": 43.7014925373,
"line_max": 122,
"alpha_frac": 0.6280467446,
"autogenerated": false,
"ratio": 3.0639386189258313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4191985363525831,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron J. Masino'
import numpy as np
from learn.metrics import PerformanceMetrics
import sklearn
def printsf(text, out_file=None, fmode = 'a', print_to_screen=True, carriage_returns=1):
if print_to_screen: print(text)
if out_file:
with open(out_file, fmode) as f:
f.write(text)
for idx in range(carriage_returns): f.write('\n')
def printsfPerformanceMetrics(pm, out_file_path, print_to_screen=True, carriage_returns=1):
'''precision = tp / (tp + fp) where tp is number of true positives in the predicted labels, and fp is the number of
false positives in the predicted labels
recall = tp / (tp + fn) where tp is same as precision, and fn is the number of false negatives is the predicted labels
f1 = 2 * (precision * recall) / (precision + recall)'''
printsf('Accuracy:\t\t\t{0:.4f}'.format(pm.accuracy),out_file_path,print_to_screen=print_to_screen)
printsf('F1-Score:\t\t\t{0:.4f}'.format(pm.f1),out_file_path,print_to_screen=print_to_screen)
printsf('PPV/Precision tp/pp:\t\t{0:.4f}'.format(pm.ppv),out_file_path,print_to_screen=print_to_screen)
printsf('NPV tn/pn:\t\t\t{0:.4f}'.format(pm.npv),out_file_path,print_to_screen=print_to_screen)
printsf('Sensitivity/Recall tp/[tp+fn]:\t{0:.4f}'.format(pm.recall),out_file_path,print_to_screen=print_to_screen)
printsf('Specificity tn/[tn+fp]:\t\t{0:.4f}'.format(pm.specificity),out_file_path,print_to_screen=print_to_screen,carriage_returns=2)
def printTwoClassConfusion(cm, out_file_path, print_to_screen=True):
printsf('\n\tConfusion Matrix', out_file_path,print_to_screen=print_to_screen)
printsf('A \t Predicted', out_file_path,print_to_screen=print_to_screen)
printsf('c \t Normal\tAbnormal', out_file_path,print_to_screen=print_to_screen)
printsf('t Normal {0}\t{1}'.format(cm[0,0],cm[0,1]), out_file_path,print_to_screen=print_to_screen)
printsf('u Abnormal {0}\t{1}'.format(cm[1,0],cm[1,1]), out_file_path,print_to_screen=print_to_screen)
printsf('a ', out_file_path,print_to_screen=print_to_screen)
printsf('l ', out_file_path,print_to_screen=print_to_screen)
def print_data_stats(train_labels, test_labels,heading, f):
tc = len(train_labels)
ac=np.sum(train_labels==1)
nc=np.sum(train_labels==0)
printsf(heading, f)
printsf('Total Training Cases:\t\t{0}'.format(tc),f)
printsf('Abnormal Training Cases:\t{0}, {1:.2f}%'.format(ac, ac/float(tc)*100),f)
printsf('Normal Training Cases:\t\t{0}, {1:.2f}%'.format(nc, nc/float(tc)*100),f)
tc = len(test_labels)
ac=np.sum(test_labels==1)
nc=np.sum(test_labels==0)
printsf('\nTotal Test Cases:\t{0}'.format(tc),f)
printsf('Abnormal Test Cases:\t{0}, {1:.2f}%'.format(ac, ac/float(tc)*100),f)
printsf('Normal Test Cases:\t{0}, {1:.2f}%'.format(nc, nc/float(tc)*100),f)
def print_grid_search_results(grid_search, name, out_file, test_input, test_labels):
#print results
printsf('\n{0} {1} Grid Search Results {0}'.format(30*'-',name),out_file)
printsf('Best score: %0.3f' % grid_search.best_score_, out_file)
printsf('Best parameter set:',out_file)
best_params = grid_search.best_estimator_.get_params()
for param_name in sorted(best_params.keys()):
printsf("\t%s: %r" % (param_name, best_params[param_name]), out_file)
#evaluate on test data
best_classifier = grid_search.best_estimator_
y_test_predicted = best_classifier.predict(test_input)
printsf('\nTest Data Performance with Best Estimator Parameters\n',out_file)
pm = PerformanceMetrics(test_labels, y_test_predicted)
printsfPerformanceMetrics(pm, out_file)
#print confusion matrix
cm = sklearn.metrics.confusion_matrix(test_labels,y_test_predicted)
printTwoClassConfusion(cm, out_file)
return pm
| {
"repo_name": "chop-dbhi/arrc",
"path": "learn/printers.py",
"copies": "1",
"size": "3827",
"license": "mit",
"hash": 3831240497916179500,
"line_mean": 53.6714285714,
"line_max": 137,
"alpha_frac": 0.6814737392,
"autogenerated": false,
"ratio": 2.8369162342475907,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40183899734475903,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron J. Masino'
import numpy as np
from numpy.random import RandomState
import pandas as pd
from learn import printers, wrangle
from learn.metrics import PerformanceMetrics
import sklearn
from functools import reduce
def load_report(path):
f = open(path,'r')
text = reduce(lambda x,y: x+y, f.readlines(), "")
f.close()
return text
if __name__ == '__main__':
keyword_file = './data/input/SDS_PV2_combined/keywords/keywords.txt'
standard_out_file = './data/output/SDS_PV2_keyword_results.txt'
region_keys = ['inner', 'middle', 'outer', 'mastoid']
report_path = './data/input/SDS_PV2_combined/reports_single_find_impr'
#load test set data - same set used for ML tests
seed = 987654321
# set the numpy random seed so results are reproducible
rs = RandomState(987654321)
# set common path variables
label_file = './data/input/SDS_PV2_combined/SDS_PV2_class_labels.txt'
# read data
label_data = pd.read_csv(label_file)
# partition the data
pos_cases, neg_cases = wrangle.partion(label_data['doc_norm']==1, label_data, ratios=[0.8,0.2])
train_mask = np.concatenate((pos_cases[0], neg_cases[0]))
test_mask = np.concatenate((pos_cases[1], neg_cases[1]))
rs.shuffle(train_mask)
rs.shuffle(test_mask)
train_labels = label_data.iloc[train_mask]
test_labels = label_data.iloc[test_mask]
# read in the text reports
train_reports = [load_report('{0}/{1}_fi.txt'.format(report_path, pid)) for pid in train_labels['pid']]
test_reports = [load_report('{0}/{1}_fi.txt'.format(report_path, pid)) for pid in test_labels['pid']]
#import keywords
keywords = {}
with open(keyword_file, 'r') as f:
key = ""
for line in f.readlines():
if line.startswith("#"):
key = line[1:].strip('\n')
else:
l = keywords.get(key,[])
v = line.split(",")[0]
l.append(v)
keywords[key] = l
#create empty patient array to hold predicted values
num_patients = len(test_labels)
patients = np.empty((num_patients,), dtype=[('pid','S7'),('inner','i4'),('middle','i4'),('outer','i4'),('mastoid','i4')])
#initialize patients array
for k in region_keys:
patients[k] = 0
#get patient values based on icd9 codes
cnt = 0
for _ , row in test_labels.iterrows():
pid = row['pid']
patients['pid'][cnt] = pid
report = test_reports[cnt]
for region in region_keys:
for keyword in keywords[region]:
if keyword in report: patients[region][cnt] = 1
cnt += 1
#compare predicted and actual
for k in region_keys:
printers.printsf('{0}Analysis for {1} ear region{0}'.format(40*'-', k), standard_out_file)
y_pred = patients[k]
y_act = test_labels[k]
pm = PerformanceMetrics(y_act, y_pred)
printers.printsfPerformanceMetrics(pm, standard_out_file)
cm = sklearn.metrics.confusion_matrix(y_act, y_pred)
printers.printTwoClassConfusion(cm, standard_out_file)
| {
"repo_name": "chop-dbhi/arrc",
"path": "keyword_analysis.py",
"copies": "1",
"size": "3129",
"license": "mit",
"hash": 2339957175155129000,
"line_mean": 33.0108695652,
"line_max": 125,
"alpha_frac": 0.6155321189,
"autogenerated": false,
"ratio": 3.4047878128400435,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45203199317400433,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron J. Masino'
import numpy as np
def extractBy(condition, data, tol = 1e-6):
not_condition = condition[:]==False
return (data[condition], data[not_condition])
def partion(condition, data, ratios=[.6,.2,.2]):
''' returns two lists (l1,l2). l1 is a list of numpy arrays where each array contains indices
into the data where the condition is True and l2 is a list of numpy arrays where each array contains
indicies into the data where the condition is False. The len(l1)=len(l2)=len(ratios) and
the lists in l1 and l2 have lengths determined by the ratio values.'''
pos = np.where(condition)[0]
neg = np.where(condition[:]==False)[0]
#SHOULD ALSO USE np.where(condition) to split data
#NEED TO MODIFY TO RETURN MASKS ONLY
#MASK SHOULD BE AN 1D NUMPY ARRAY
#if not (np.sum(ratios) == 1 or np.sum(ratios) == 1.0): raise Exception('Ratios must sum to 1, got {0}'.format(np.sum(ratios)))
#(pos, neg) = extractBy(condition, data)
pos_row_count = pos.shape[0]
neg_row_count = neg.shape[0]
s1 = 0
s2 = 0
s3 = 0
s4 = 0
pdata = []
ndata = []
for i in range(len(ratios)):
r = ratios[i]
if i==len(ratios)-1:
s2 = pos_row_count
s4 = neg_row_count
else:
s2 = min(s1 + int(round(r*pos_row_count)), pos_row_count)
s4 = min(s3 + int(round(r*neg_row_count)), neg_row_count)
if s2<=s1: raise Exception('Insufficient positive data for partition, s1={0}, s2={1}'.format(s1,s2))
if s4<=s3: raise Exception('Insufficient negative data for partition, s3={0}, s4={1}'.format(s3,s4))
pdata.append(pos[s1:s2])
ndata.append(neg[s3:s4])
s1 = s2
s3 = s4
return(pdata,ndata) | {
"repo_name": "chop-dbhi/arrc",
"path": "learn/wrangle.py",
"copies": "1",
"size": "1775",
"license": "mit",
"hash": -3090557369432531500,
"line_mean": 38.4666666667,
"line_max": 131,
"alpha_frac": 0.6163380282,
"autogenerated": false,
"ratio": 3.0187074829931975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9073101746757913,
"avg_score": 0.012388752887056927,
"num_lines": 45
} |
__author__ = 'Aaron J. Masino'
'''
Created on Feb 3, 2014
@author: masinoa
'''
import nltk.data
from nltk.tokenize import regexp_tokenize
from nltk.corpus import stopwords
import re
from functools import reduce
#load a sentence tokenizer, faster for many tokenizations
sent_tokenizer = nltk.data.load('file:./learn/english.pickle')
def sentences(text):
'''Uses /tokenizers/punkt/english tokenizer to create list of sentences from text'''
return sent_tokenizer.tokenize(text)
def words(text, splitContractions=False, contractChars = ["'"]):
'''uses a regexpTokenizer to tokenize text to words. If splitContractions is true,
the regex pattern is [\w]+ so that contractions are split, e.g. "I can't" -> ['I','can','t'],
otherwise the regex pattern is [\w']+ so that contractions are not split, i.e. "I can't" -> ['I', "can't"]
Additional contract characters, e.g. a hyphen, can be added by over riding the contractChars arg'''
if splitContractions:
pat = "[\w]+"
else:
pat = "[\w{0}]+".format(reduce(lambda x,y: x+y, contractChars, ""))
return regexp_tokenize(text, pat, discard_empty=True)
def replace_patterns(text, patterns, replaceWith="", flags = None):
'''replaces each occurrence of each pattern with the replaceWith value'''
rt = text
for pat in patterns:
if not flags:
rt = re.sub(pat, replaceWith, rt)
else:
rt = re.sub(pat, replaceWith, rt, flags = flags)
return rt
def replace_whole_words(text, words=[], replaceWith="", flags = re.IGNORECASE):
rt = text
for w in words:
rt = replace_patterns(rt, [r'(?<=\s){0}(?=[\s,!\.;])'.format(w), r'^{0}(?=[\s,!\.;])'.format(w)], replaceWith, flags=flags)
return rt
def replace_digits(text, replaceWith='number'):
'''attempts to remove numbers from text using given regex patterns and replace them with the replacementWord'''
return replace_patterns(text, [r'[-+]?\d+\.\d*',r'[-+]?\d*\.\d+',r'[-+]?\d+'], replaceWith)
def standard_numerals():
return ["one","two","three","four", "five","six","seven","eight","nine", "eleven","twelve"]
def replace_numerals(text, replaceWith='number', numerals=None, flags = re.IGNORECASE):
if not numerals:
return replace_whole_words(text, standard_numerals(), replaceWith, flags)
else:
return replace_whole_words(text, numerals, replaceWith, flags)
def standard_units():
return [r"year", r"years",
r"month", r"months",
r"day", r"days",
r"hour", r"hours",
r"minute", r"minutes",
r"second", r"seconds",
r"cm", r"mm"]
def replace_units(text, replaceWith='unit', units = None, flags = re.IGNORECASE):
if not units:
return replace_whole_words(text, standard_units(), replaceWith, flags)
else:
return replace_whole_words(text, units, replaceWith, flags)
def porter_stem(wordList):
porter = nltk.PorterStemmer()
return [porter.stem(w) for w in wordList]
def filter_stop_words(wordlist):
sw = stopwords.words('english')
return filter(lambda w: w not in sw, wordlist)
def text_preprocessor(text):
ct = replace_digits(text)
ct = replace_numerals(ct)
ct = replace_units(ct)
_words = [word.lower() for word in words(ct)]
_words = filter(lambda x: x not in stopwords.words('english') and len(x)>2, _words)
_words = porter_stem(_words)
return reduce(lambda x,y: '{0} {1}'.format(x,y), _words, "")
| {
"repo_name": "chop-dbhi/arrc",
"path": "nlp/util.py",
"copies": "1",
"size": "3491",
"license": "mit",
"hash": 5654559967217085000,
"line_mean": 36.5376344086,
"line_max": 131,
"alpha_frac": 0.6422228588,
"autogenerated": false,
"ratio": 3.382751937984496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4524974796784496,
"avg_score": null,
"num_lines": null
} |
import copy
def fieldOfView(startX, startY, mapWidth, mapHeight, radius,
funcVisitTile, funcTileBlocked):
"""
Determines which coordinates on a 2D grid are visible from a
particular coordinate.
startX, startY: The (x, y) coordinate on the grid that
is the centre of view.
mapWidth, mapHeight: The maximum extents of the grid. The
minimum extents are assumed to be both
zero.
radius: How far the field of view may extend
in either direction along the x and y
axis.
funcVisitTile: User function that takes two integers
representing an (x, y) coordinate. Is
used to "visit" visible coordinates.
funcTileBlocked: User function that takes two integers
representing an (x, y) coordinate.
Returns True if the coordinate blocks
sight to coordinates "behind" it.
"""
visited = set() # Keep track of what tiles have been visited so
# that no tile will be visited twice.
# Will always see the centre.
funcVisitTile(startX, startY)
visited.add((startX, startY))
# Ge the dimensions of the actual field of view, making
# sure not to go off the map or beyond the radius.
if startX < radius:
minExtentX = startX
else:
minExtentX = radius
if mapWidth - startX - 1 < radius:
maxExtentX = mapWidth - startX - 1
else:
maxExtentX = radius
if startY < radius:
minExtentY = startY
else:
minExtentY = radius
if mapHeight - startY - 1 < radius:
maxExtentY = mapHeight - startY - 1
else:
maxExtentY = radius
# Northeast quadrant
__checkQuadrant(visited, startX, startY, 1, 1,
maxExtentX, maxExtentY,
funcVisitTile, funcTileBlocked)
# Southeast quadrant
__checkQuadrant(visited, startX, startY, 1, -1,
maxExtentX, minExtentY,
funcVisitTile, funcTileBlocked)
# Southwest quadrant
__checkQuadrant(visited, startX, startY, -1, -1,
minExtentX, minExtentY,
funcVisitTile, funcTileBlocked)
# Northwest quadrant
__checkQuadrant(visited, startX, startY, -1, 1,
minExtentX, maxExtentY,
funcVisitTile, funcTileBlocked)
#-------------------------------------------------------------
class __Line(object):
def __init__(self, xi, yi, xf, yf):
self.xi = xi
self.yi = yi
self.xf = xf
self.yf = yf
dx = property(fget = lambda self: self.xf - self.xi)
dy = property(fget = lambda self: self.yf - self.yi)
def pBelow(self, x, y):
return self.relativeSlope(x, y) > 0
def pBelowOrCollinear(self, x, y):
return self.relativeSlope(x, y) >= 0
def pAbove(self, x, y):
return self.relativeSlope(x, y) < 0
def pAboveOrCollinear(self, x, y):
return self.relativeSlope(x, y) <= 0
def pCollinear(self, x, y):
return self.relativeSlope(x, y) == 0
def lineCollinear(self, line):
return self.pCollinear(line.xi, line.yi)\
and self.pCollinear(line.xf, line.yf)
def relativeSlope(self, x, y):
return (self.dy * (self.xf - x))\
- (self.dx * (self.yf - y))
class __ViewBump:
def __init__(self, x, y, parent):
self.x = x
self.y = y
self.parent = parent
class __View:
def __init__(self, shallowLine, steepLine):
self.shallowLine = shallowLine
self.steepLine = steepLine
self.shallowBump = None
self.steepBump = None
def __checkQuadrant(visited, startX, startY, dx, dy,
extentX, extentY, funcVisitTile, funcTileBlocked):
activeViews = []
shallowLine = __Line(0, 1, extentX, 0)
steepLine = __Line(1, 0, 0, extentY)
activeViews.append( __View(shallowLine, steepLine) )
viewIndex = 0
# Visit the tiles diagonally and going outwards
#
# .
# .
# . .
# 9 .
# 5 8 .
# 2 4 7
# @ 1 3 6 . . .
maxI = extentX + extentY
i = 1
while i != maxI + 1 and len(activeViews) > 0:
if 0 > i - extentX:
startJ = 0
else:
startJ = i - extentX
if i < extentY:
maxJ = i
else:
maxJ = extentY
j = startJ
while j != maxJ + 1 and viewIndex < len(activeViews):
x = i - j
y = j
__visitCoord(visited, startX, startY, x, y, dx, dy,
viewIndex, activeViews,
funcVisitTile, funcTileBlocked)
j += 1
i += 1
def __visitCoord(visited, startX, startY, x, y, dx, dy, viewIndex,
activeViews, funcVisitTile, funcTileBlocked):
# The top left and bottom right corners of the current coordinate.
topLeft = (x, y + 1)
bottomRight = (x + 1, y)
while viewIndex < len(activeViews)\
and activeViews[viewIndex].steepLine.pBelowOrCollinear(
bottomRight[0], bottomRight[1]):
# The current coordinate is above the current view and is
# ignored. The steeper fields may need it though.
viewIndex += 1
if viewIndex == len(activeViews)\
or activeViews[viewIndex].shallowLine.pAboveOrCollinear(
topLeft[0], topLeft[1]):
# Either the current coordinate is above all of the fields
# or it is below all of the fields.
return
# It is now known that the current coordinate is between the steep
# and shallow lines of the current view.
isBlocked = False
# The real quadrant coordinates
realX = x * dx
realY = y * dy
if (startX + realX, startY + realY) not in visited:
visited.add((startX + realX, startY + realY))
funcVisitTile(startX + realX, startY + realY)
# else:
# # Debugging
# print (startX + realX, startY + realY)
isBlocked = funcTileBlocked(startX + realX, startY + realY)
if not isBlocked:
# The current coordinate does not block sight and therefore
# has no effect on the view.
return
if activeViews[viewIndex].shallowLine.pAbove(
bottomRight[0], bottomRight[1])\
and activeViews[viewIndex].steepLine.pBelow(
topLeft[0], topLeft[1]):
# The current coordinate is intersected by both lines in the
# current view. The view is completely blocked.
del activeViews[viewIndex]
elif activeViews[viewIndex].shallowLine.pAbove(
bottomRight[0], bottomRight[1]):
# The current coordinate is intersected by the shallow line of
# the current view. The shallow line needs to be raised.
__addShallowBump(topLeft[0], topLeft[1],
activeViews, viewIndex)
__checkView(activeViews, viewIndex)
elif activeViews[viewIndex].steepLine.pBelow(
topLeft[0], topLeft[1]):
# The current coordinate is intersected by the steep line of
# the current view. The steep line needs to be lowered.
__addSteepBump(bottomRight[0], bottomRight[1], activeViews,
viewIndex)
__checkView(activeViews, viewIndex)
else:
# The current coordinate is completely between the two lines
# of the current view. Split the current view into two views
# above and below the current coordinate.
shallowViewIndex = viewIndex
viewIndex += 1
steepViewIndex = viewIndex
activeViews.insert(shallowViewIndex,
copy.deepcopy(activeViews[shallowViewIndex]))
__addSteepBump(bottomRight[0], bottomRight[1],
activeViews, shallowViewIndex)
if not __checkView(activeViews, shallowViewIndex):
viewIndex -= 1
steepViewIndex -= 1
__addShallowBump(topLeft[0], topLeft[1], activeViews,
steepViewIndex)
__checkView(activeViews, steepViewIndex)
def __addShallowBump(x, y, activeViews, viewIndex):
activeViews[viewIndex].shallowLine.xf = x
activeViews[viewIndex].shallowLine.yf = y
activeViews[viewIndex].shallowBump = __ViewBump(x, y,
activeViews[viewIndex].shallowBump)
curBump = activeViews[viewIndex].steepBump
while curBump is not None:
if activeViews[viewIndex].shallowLine.pAbove(
curBump.x, curBump.y):
activeViews[viewIndex].shallowLine.xi = curBump.x
activeViews[viewIndex].shallowLine.yi = curBump.y
curBump = curBump.parent
def __addSteepBump(x, y, activeViews, viewIndex):
activeViews[viewIndex].steepLine.xf = x
activeViews[viewIndex].steepLine.yf = y
activeViews[viewIndex].steepBump = __ViewBump(x, y,
activeViews[viewIndex].steepBump)
curBump = activeViews[viewIndex].shallowBump
while curBump is not None:
if activeViews[viewIndex].steepLine.pBelow(
curBump.x, curBump.y):
activeViews[viewIndex].steepLine.xi = curBump.x
activeViews[viewIndex].steepLine.yi = curBump.y
curBump = curBump.parent
def __checkView(activeViews, viewIndex):
"""
Removes the view in activeViews at index viewIndex if
- The two lines are coolinear
- The lines pass through either extremity
"""
shallowLine = activeViews[viewIndex].shallowLine
steepLine = activeViews[viewIndex].steepLine
if shallowLine.lineCollinear(steepLine)\
and ( shallowLine.pCollinear(0, 1)
or shallowLine.pCollinear(1, 0) ):
del activeViews[viewIndex]
return False
else:
return True
class FovMap(object):
def __init__(self, w, h):
self.w = w
self.h = h
self.map = [
[0 for i in range(self.h)] for j in range(self.w)
]
self.clear_light()
def clear_light(self):
self.light_map = [
[0 for i in range(self.h)] for j in range(self.w)
]
def get_explored(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
return self.map[x][y]
return 0
def set_explored(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
self.map[x][y] = 1
def set_lit(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
self.light_map[x][y] = 1
self.set_explored(x, y)
def get_lit(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
return self.light_map[x][y]
return 0 | {
"repo_name": "RedMike/pYendor",
"path": "lib/fov.py",
"copies": "1",
"size": "11735",
"license": "bsd-2-clause",
"hash": -6715600887052716000,
"line_mean": 30.512465374,
"line_max": 73,
"alpha_frac": 0.567106945,
"autogenerated": false,
"ratio": 3.758808456117873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4825915401117873,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron'
englishLetterFreq = {'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I':
6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C':
2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P':
1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z':
0.07}
ETAOIN = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def getLetterCount(message):
letterCount = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0,
'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0, 'Q': 0,
'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0, 'Y': 0, 'Z': 0}
for letter in message.upper():
if letter in LETTERS:
letterCount[letter] += 1
return letterCount
def getItemAtIndexZero(x):
return x[0]
def getFrequencyOrder(message):
letterToFreq = getLetterCount(message)
freqToLetter = {}
for letter in LETTERS:
if letterToFreq[letter] not in freqToLetter:
freqToLetter[letterToFreq[letter]] = [letter]
else:
freqToLetter[letterToFreq[letter]].append(letter)
for freq in freqToLetter:
freqToLetter[freq].sort(key=ETAOIN.find,reverse=True)
freqToLetter[freq] = ''.join(freqToLetter[freq])
freqPairs = list(freqToLetter.items())
freqPairs.sort(key=getItemAtIndexZero,reverse=True)
freqOrder = []
for freqPair in freqPairs:
freqOrder.append(freqPair[1])
return ''.join(freqOrder)
def englishFreqMatchScore(message):
freqOrder = getFrequencyOrder(message)
matchScore = 0
for letter in ETAOIN[:6]:
if letter in ETAOIN[:6]:
matchScore += 1
for letter in ETAOIN[-6:]:
if letter in freqOrder[-6:]:
matchScore += 1
return matchScore | {
"repo_name": "Aaron-Cai/PythonCipher",
"path": "freqAnalysis.py",
"copies": "1",
"size": "1684",
"license": "bsd-2-clause",
"hash": 7612826142082984000,
"line_mean": 29.6363636364,
"line_max": 81,
"alpha_frac": 0.614608076,
"autogenerated": false,
"ratio": 2.322758620689655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8266584132389143,
"avg_score": 0.03415651286010247,
"num_lines": 55
} |
__author__ = 'Aaron'
import pyperclip,sys,os
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def translateMessage(key,message,mode):
translated = []
keyIndex = 0;
key = key.upper()
for symbol in message:
num = LETTERS.find(symbol.upper())
if num!= -1:
if mode == 'encrypt':
num += LETTERS.find(key[keyIndex])
elif mode == 'decrypt':
num -= LETTERS.find(key[keyIndex])
num %= len(LETTERS)
if symbol.isupper():
translated.append(LETTERS[num])
elif symbol.islower():
translated.append(LETTERS[num].lower())
keyIndex += 1
keyIndex %= len(key)
else:
translated.append(symbol)
return ''.join(translated)
def encryptMessage(key,message):
return translateMessage(key,message,'encrypt')
def decryptMessage(key,message):
return translateMessage(key,message,'decrypt')
def main():
if(len(sys.argv) != 4):
print 'invalid input'
else:
inputFileName = sys.argv[-3]
outputFileName = sys.argv[-2]
mode = sys.argv[-1]
if not os.path.exists(inputFileName):
print 'file '+ inputFileName + ' does not exit'
key = 'ASIMOV'
message = open(inputFileName).read()
if mode == 'encrypt':
translated = encryptMessage(key,message)
elif mode == 'decrypt':
translated = decryptMessage(key,message)
print ''
print ''
print ''
print 'Key: ' + key
print mode + ' result'
print translated[:200]
print 'saving as ' + outputFileName + '...'
open(outputFileName,'w').write(translated)
print 'done'
if __name__ == '__main__':
main()
| {
"repo_name": "Aaron-Cai/PythonCipher",
"path": "vigenereCipher.py",
"copies": "1",
"size": "1499",
"license": "bsd-2-clause",
"hash": -125788802840440340,
"line_mean": 17.2804878049,
"line_max": 50,
"alpha_frac": 0.6617745163,
"autogenerated": false,
"ratio": 3.046747967479675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4208522483779675,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import numpy.lib.index_tricks as itricks
from WavefrontPSF.psf_evaluator import Moment_Evaluator
#from scipy.signal import convolve2d as convolve
def convolve(A, B):
""" Performs a convolution of two 2D arrays """
C = np.fft.ifft2(np.fft.fft2(A) * np.fft.fft2(B))
C = np.fft.fftshift(C)
C = C / np.sum(C)
return np.real(C)
def convolveStar(A, B):
""" Performs a convolution of two 2D arrays, but take the complex conjugate of B """
C = np.fft.ifft2(np.fft.fft2(A) * np.conjugate(np.fft.fft2(B)))
C = np.fft.fftshift(C)
C = C / np.sum(C)
return np.real(C)
def calcChi2(PSF,psi_r,phi_tilde,beta,mu0):
""" Calculate chi2 between PSF convolved with restored image and measured image
"""
imageC = beta * convolve(PSF,psi_r)
diffImage = phi_tilde-imageC
varianceImage = phi_tilde + mu0
chi2 = np.sum(diffImage*diffImage/varianceImage)
return chi2
def makeGaussian(shape,Mxx,Myy,Mxy):
""" Return a 2-d Gaussian function, centered at 0, with desired 2-nd order moments
"""
ny = shape[0]
nx = shape[1]
ylo = -ny/2. + 0.5
yhi = ny/2. - 0.5
xlo = -nx/2. + 0.5
xhi = nx/2. - 0.5
yArr,xArr = itricks.mgrid[ylo:yhi:1j*ny,xlo:xhi:1j*nx]
rho = Mxy/np.sqrt(Mxx*Myy)
gaussian = np.exp( -((yArr*yArr)/Myy + (xArr*xArr)/Mxx - 2.*rho*xArr*yArr/np.sqrt(Mxx*Myy))/(2.*(1-rho*rho)) )
return gaussian
def makeMask(image,sigma,nsigma=3.):
""" build a mask from the noisy Image
"""
mask = np.where(image>nsigma*sigma,1.,0.)
# use working copy
maskcopy = mask.copy()
# mask edge
maskcopy[0,:] = 0.
maskcopy[-1,:] = 0.
maskcopy[:,0] = 0.
maskcopy[:,-1] = 0.
# demand that pixels have 3 neighbors also above 3sigma
shape = mask.shape
for j in range(1,shape[0]-1):
for i in range(1,shape[1]-1):
if mask[j,i]==1:
# check 8 neighbors
nNeigh = mask[j+1,i-1] + mask[j+1,i] + mask[j+1,i+1] + mask[j,i-1] + mask[j,i+1] + mask[j-1,i-1] + mask[j-1,i] + mask[j-1,i+1]
if nNeigh<3:
maskcopy[j,i] = 0.
# fill return array
mask = maskcopy.copy()
return mask
def deconvolve(PSF,phi_tilde,psi_0=None,mask=None,mu0=0.0,niterations=10,convergence=-1,chi2Level=0.0,extra=False):
""" Implementation of the Richardson-Lucy deconvolution algorithm.
Notation follows Lucy 1974, Eqn 15 and 14. Add noise term following
Snyder et al 1993.
Arguments
---------
PSF known Point Spread Function
phi_tilde measured object
psi_0 starting guess for deconvolution
mask =0 for bins where we know that recovered image has no flux
mu0 background noise estimate
"""
# normalize PSF
PSF = PSF / np.sum(PSF)
# if no initial guess, make one from 2nd moments of input image - PSF
if psi_0 is None:
#Turns out Gaussians are a bad initial guess, still unclear as to why
#Can use the image itself as the initial guess, also works fine.
psi_r = np.ones(PSF.shape)
else:
# initial guess
psi_r = np.abs(psi_0)
# mask starting guess
if mask is not None:
psi_r = psi_r * mask
# normalize starting guess
psi_r = psi_r / np.sum(psi_r)
#TODO Maybe this should be an error instead of a warning.
if np.any(np.isnan(psi_r)):
raise RuntimeError("NaN in initial guess, skip this value. ")
# mask image too
if mask is not None:
phi_tilde = phi_tilde * mask
# find normalization for measured image
beta = np.sum(phi_tilde)
# now iterate, either until convergence reached or fixed number of iterations are done
psiByIter = []
diffByIter = []
chi2ByIter = []
iteration = 0
continueTheLoop = True
while continueTheLoop:
# calculate next approximation to psi
phi_r = beta*convolve(psi_r,PSF) + mu0
#fixing a possible bug in noisy deocnv
psi_rplus1 = psi_r * convolveStar(beta*(phi_tilde)/phi_r,PSF)
# mask the next iteration
if mask != None:
psi_rplus1 = psi_rplus1 * mask
# normalize it
psi_rplus1 = psi_rplus1 / np.sum(psi_rplus1)
# check for convergence if desired
#Why are the psiByIter appends inside the convergence test?
if convergence>0:
# compare psi_r and psi_rplus1
psiByIter.append(psi_rplus1)
diff = np.sum(np.abs(psi_rplus1 - psi_r))
diffByIter.append(diff)
if diff<convergence:
continueTheLoop = False
elif len(diffByIter) > 2 and diffByIter[-1] > diffByIter[-2] > diffByIter[-3]: #diverging!
#raise RuntimeError("Deconvolution Diverged.")
pass
# also calculate how close to a solution we are
chi2 = calcChi2(PSF,psi_rplus1,phi_tilde,beta,mu0)
chi2ByIter.append(chi2)
if 0<chi2<chi2Level:
continueTheLoop = False
# check for Chi2 level
# always check number of iterations
if iteration==niterations-1:
continueTheLoop = False
# save for next iteration
iteration+=1
psi_r = np.array(psi_rplus1) # does a deepcopy
#TODO rescale deconv by flux
# we are done!
#check to see if the deconv failed
evaluator = Moment_Evaluator()
resid_moments = evaluator(psi_rplus1)
#TODO what to do if makeGaussian throws an error?
# subtract 2nd order moments in quadrature, use an object with the difference
Mxx = resid_moments['Mxx'][0]
Myy = resid_moments['Myy'][0]
Mxy = resid_moments['Mxy'][0]
#print Mxx, Myy, Mxy
if any(np.isnan(x) for x in [Mxx, Myy, Mxy]):
raise RuntimeError("Deconvolution Failed.")
if extra:
return psi_rplus1,diffByIter,psiByIter,chi2ByIter
else:
return psi_rplus1
| {
"repo_name": "aaronroodman/DeconvolvePSF",
"path": "src/lucy.py",
"copies": "1",
"size": "6087",
"license": "mit",
"hash": -1050264533834633900,
"line_mean": 30.3762886598,
"line_max": 142,
"alpha_frac": 0.6004599967,
"autogenerated": false,
"ratio": 3.2481323372465316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43485923339465316,
"avg_score": null,
"num_lines": null
} |
import os
import glob
import string
sourcepath = '\src'
distpath = '\dist'
sourcelist = []
for root, dirs, files in os.walk(sourcepath):
for name in files:
pathfile = os.path.join(root, name)
#ignore python extensions
pathfile = string.replace(pathfile, '.pyo', '')
pathfile = string.replace(pathfile, '.pyc', '')
pathfile = string.replace(pathfile, '.py', '')
#remove root path
pathfile = string.replace(pathfile, sourcepath, '')
sourcelist.append(pathfile)
distlist = []
for root, dirs, files in os.walk(distpath):
for name in files:
pathfile = os.path.join(root, name)
#ignore python extensions
pathfile = string.replace(pathfile, '.pyo', '')
pathfile = string.replace(pathfile, '.pyc', '')
pathfile = string.replace(pathfile, '.py', '')
#remove root path
pathfile = string.replace(pathfile, distpath, '')
distlist.append(pathfile)
sourceset = set(sourcelist)
distset = set(distlist)
missing = sourceset - distset
for (files) in sorted(missing):
print files | {
"repo_name": "lantra/vugamedev",
"path": "comparison.py",
"copies": "1",
"size": "1512",
"license": "mit",
"hash": 3477741370272653300,
"line_mean": 28.6666666667,
"line_max": 77,
"alpha_frac": 0.6342592593,
"autogenerated": false,
"ratio": 3.714987714987715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48492469742877153,
"avg_score": null,
"num_lines": null
} |
__author__ = "Aaron Steele (eightysteele@gmail.com)"
__contributors__ = []
from autocomplete_handler import AutocompleteName
import cache
import collections
import csv
import logging
import json
import urllib
import webapp2
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext.ndb import model
from google.appengine.ext.webapp.util import run_wsgi_app
global entities
entities = []
global ac_entities
ac_entities = []
global names_map
def check_entities(flush=False):
"""Writes entities to datastore in batches."""
global entities
global ac_entities
if len(entities) >= 500 or flush:
ndb.put_multi(entities)
entities = []
if len(ac_entities) >= 500 or flush:
ndb.put_multi(ac_entities)
ac_entities = []
def handle_result(rpc, name, url, payload):
"""Builds up a list of CacheEntry entities and batch puts them."""
key = 'name-%s' % name
try:
result = rpc.get_result()
entities.append(cache.create_entry(key, result.content))
entities.extend([cache.create_entry('name-%s' % x, result.content)
for x in names_map[name]])
check_entities()
except urlfetch.DownloadError:
tries = 10
while tries > 0:
try:
result = urlfetch.fetch(url, payload=payload, method='POST', deadline=60)
entities.append(cache.create_entry(key, result.content))
entities.extend([cache.create_entry('name-%s' % x, result.content)
for x in names_map[name]])
check_entities()
return
except urlfetch.DownloadError:
tries = tries - 1
def create_callback(rpc, name, url, payload):
"""Callback for a request."""
return lambda: handle_result(rpc, name, url, payload)
def name_keys(name):
"""Generates name keys that are at least 3 characters long.
Example usage:
> name_keys('concolor')
> ['con', 'conc', 'conco', 'concol', 'concolo', 'concolor']
"""
yield name.strip()
for n in name.split():
name_len = len(n)
yield n
if name_len > 3:
indexes = range(3, name_len)
indexes.reverse()
for i in indexes:
yield n[:i]
def load_names():
"""Loads names.csv into a defaultdict with scientificname keys mapped to
a list of common names."""
global names_map
names_map = collections.defaultdict(list)
for row in csv.DictReader(open('names.csv', 'r')):
names_map[row['scientific'].strip()].extend([x.strip() for x in row['english'].split(',')])
def add_autocomplete_cache(name, kind):
"""Add autocomplete cache entries.
Arguments:
name - The name (Puma concolor)
kind - The name kind (scientific, english, etc)
"""
name = name.strip()
kind = kind.strip()
name_val = '%s:%s' % (name, kind)
for term in name_keys(name):
key = 'ac-%s' % term
names = cache.get(key, loads=True) # names is a list of names
if names:
if name_val not in names:
names.append(name_val)
else:
names = [name_val]
entity = cache.create_entry(key, names, dumps=True)
ac_entities.append(entity)
check_entities(flush=True)
def add_autocomplete_results(name):
# Add name search results.
name = name.strip()
for term in name_keys(name):
key = 'ac-%s' % term
names_list = cache.get(key, loads=True) # names is a list of names
# Note: Each 'x' here is of the form name:kind which is why we split on ':'
rows = [cache.get('name-%s' % x.split(':')[0], loads=True)['rows'] for x in names_list]
result = reduce(lambda x, y: x + y, rows)
entity = cache.get('name-%s' % term, loads=True)
if not entity:
entity = cache.create_entry(
'name-%s' % term, dict(rows=result), dumps=True)
if entity.has_key('rows'):
for r in entity['rows']:
if r not in result:
result.append(r)
entity = cache.create_entry(
'name-%s' % term, dict(rows=result), dumps=True)
else:
logging.warn('No rows for entity %s' % entity)
entities.append(entity)
check_entities(flush=True)
class ClearCache(webapp2.RequestHandler):
def get(self):
self.error(405)
self.response.headers['Allow'] = 'POST'
return
def post(self):
keys = []
key_count = 0
for key in cache.CacheItem.query().iter(keys_only=True):
if key_count > 100:
try:
ndb.delete_multi(keys)
keys = []
key_count = 0
except:
logging.info('delete_multi retry')
tries = 10
while tries > 0:
try:
ndb.delete_multi(keys)
keys = []
key_count = 0
except:
logging.info('delete_multi retries left: %s' % tries)
tries = tries - 1
log.info('Failed to delete_multi on %s' % keys)
keys.append(key)
if len(keys) > 0:
ndb.delete_multi(keys)
class SearchCacheBuilder(webapp2.RequestHandler):
def get(self):
self.error(405)
self.response.headers['Allow'] = 'POST'
return
def post(self):
url = 'https://mol.cartodb.com/api/v2/sql'
sql = "select distinct(scientificname) from scientificnames where type = 'protectedarea'"
rows = []
# Get polygons names:
request = '%s?%s' % (url, urllib.urlencode(dict(q=sql)))
try:
result = urlfetch.fetch(request, deadline=60)
except urlfetch.DownloadError:
tries = 10
while tries > 0:
try:
result = urlfetch.fetch(request, deadline=60)
except urlfetch.DownloadError:
tries = tries - 1
content = result.content
rows.extend(json.loads(content)['rows'])
load_names()
# Get unique names from points and polygons:
unique_names = list(set([x['scientificname'] for x in rows]))
#sql = "SELECT p.provider as source, p.scientificname as name, p.type as type FROM polygons as p WHERE p.scientificname = '%s' UNION SELECT t.provider as source, t.scientificname as name, t.type as type FROM points as t WHERE t.scientificname = '%s'"
sql = "SELECT sn.provider AS source, sn.scientificname AS name, sn.type AS type FROM scientificnames AS sn WHERE sn.scientificname = '%s'"
# Cache search results.
rpcs = []
for names in self.names_generator(unique_names):
for name in names:
q = sql % name
payload = urllib.urlencode(dict(q=q))
rpc = urlfetch.create_rpc(deadline=60)
rpc.callback = create_callback(rpc, name, url, payload)
urlfetch.make_fetch_call(rpc, url, payload=payload, method='POST')
rpcs.append(rpc)
for rpc in rpcs:
rpc.wait()
check_entities(flush=True)
# Build autocomplete cache:
for name in unique_names:
add_autocomplete_cache(name, 'scientific')
if names_map.has_key(name):
for common in names_map[name]:
add_autocomplete_cache(common, 'english')
check_entities(flush=True)
# # Build autocomplete search results cache:
for name in unique_names:
add_autocomplete_results(name)
if names_map.has_key(name):
for common in names_map[name]:
add_autocomplete_results(common)
check_entities(flush=True)
def names_generator(self, unique_names):
"""Generates lists of at most 10 names."""
names = []
for x in xrange(len(unique_names)):
names.append(unique_names[x])
if x % 10 == 0:
yield names
names = []
if len(names) > 0:
yield names
class AutoCompleteBuilder(webapp2.RequestHandler):
def get(self):
self.error(405)
self.response.headers['Allow'] = 'POST'
return
def post(self):
url = 'https://mol.cartodb.com/api/v2/sql'
sql_points = "select distinct(scientificname) from points limit 800"
sql_polygons = "select distinct(scientificname) from polygons limit 800"
# Get points names:
request = '%s?%s' % (url, urllib.urlencode(dict(q=sql_points)))
result = urlfetch.fetch(request, deadline=60)
content = result.content
rows = json.loads(content)['rows']
# Get polygons names:
request = '%s?%s' % (url, urllib.urlencode(dict(q=sql_polygons)))
result = urlfetch.fetch(request, deadline=60)
content = result.content
rows.extend(json.loads(content)['rows'])
load_names()
# Get unique names from points and polygons:
unique_names = list(set([x['scientificname'] for x in rows]))
sql = "SELECT p.provider as source, p.scientificname as name, p.type as type FROM polygons as p WHERE p.scientificname = '%s' UNION SELECT t.provider as source, t.scientificname as name, t.type as type FROM points as t WHERE t.scientificname = '%s'"
# Cache search results.
# rpcs = []
# for names in self.names_generator(unique_names):
# for name in names:
# q = sql % (name, name)
# payload = urllib.urlencode(dict(q=q))
# rpc = urlfetch.create_rpc(deadline=60)
# rpc.callback = create_callback(rpc, name, url, payload)
# urlfetch.make_fetch_call(rpc, url, payload=payload, method='POST')
# rpcs.append(rpc)
# for rpc in rpcs:
# rpc.wait()
# check_entities(flush=True)
# Build autocomplete cache:
for name in unique_names:
add_autocomplete_cache(name, 'scientific')
if names_map.has_key(name):
for common in names_map[name]:
add_autocomplete_cache(common, 'english')
check_entities(flush=True)
# Build autocomplete search results cache:
# for name in unique_names:
# add_autocomplete_results(name)
# if names_map.has_key(name):
# for common in names_map[name]:
# add_autocomplete_results(common)
# check_entities(flush=True)
def names_generator(self, unique_names):
"""Generates lists of at most 10 names."""
names = []
for x in xrange(len(unique_names)):
names.append(unique_names[x])
if x % 10 == 0:
yield names
names = []
if len(names) > 0:
yield names
class SearchResponseBuilder(webapp2.RequestHandler):
def get(self):
self.error(405)
self.response.headers['Allow'] = 'POST'
return
def post(self):
url = 'https://mol.cartodb.com/api/v2/sql'
sql_points = "select distinct(scientificname) from points limit 800"
sql_polygons = "select distinct(scientificname) from polygons limit 800"
# Get points names:
request = '%s?%s' % (url, urllib.urlencode(dict(q=sql_points)))
result = urlfetch.fetch(request, deadline=60)
content = result.content
rows = json.loads(content)['rows']
# Get polygons names:
request = '%s?%s' % (url, urllib.urlencode(dict(q=sql_polygons)))
result = urlfetch.fetch(request, deadline=60)
content = result.content
rows.extend(json.loads(content)['rows'])
load_names()
# Get unique names from points and polygons:
unique_names = list(set([x['scientificname'] for x in rows]))
sql = "SELECT p.provider as source, p.scientificname as name, p.type as type FROM polygons as p WHERE p.scientificname = '%s' UNION SELECT t.provider as source, t.scientificname as name, t.type as type FROM points as t WHERE t.scientificname = '%s'"
# Cache search results.
# rpcs = []
# for names in self.names_generator(unique_names):
# for name in names:
# q = sql % (name, name)
# payload = urllib.urlencode(dict(q=q))
# rpc = urlfetch.create_rpc(deadline=60)
# rpc.callback = create_callback(rpc, name, url, payload)
# urlfetch.make_fetch_call(rpc, url, payload=payload, method='POST')
# rpcs.append(rpc)
# for rpc in rpcs:
# rpc.wait()
# check_entities(flush=True)
# Build autocomplete cache:
# for name in unique_names:
# add_autocomplete_cache(name, 'scientific')
# if names_map.has_key(name):
# for common in names_map[name]:
# add_autocomplete_cache(common, 'english')
# check_entities(flush=True)
# Build autocomplete search results cache:
for name in unique_names:
add_autocomplete_results(name)
if names_map.has_key(name):
for common in names_map[name]:
add_autocomplete_results(common)
check_entities(flush=True)
def names_generator(self, unique_names):
"""Generates lists of at most 10 names."""
names = []
for x in xrange(len(unique_names)):
names.append(unique_names[x])
if x % 10 == 0:
yield names
names = []
if len(names) > 0:
yield names
application = webapp2.WSGIApplication(
[('/backend/build_search_cache', SearchCacheBuilder),
('/backend/clear_search_cache', ClearCache),
('/backend/build_autocomplete', AutoCompleteBuilder),
('/backend/build_search_response', SearchResponseBuilder),]
, debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| {
"repo_name": "MapofLife/MOL",
"path": "app/search_cache_backend.py",
"copies": "1",
"size": "14424",
"license": "bsd-3-clause",
"hash": -5081427497706750000,
"line_mean": 34.702970297,
"line_max": 258,
"alpha_frac": 0.5644758735,
"autogenerated": false,
"ratio": 3.9626373626373628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022652732568211563,
"num_lines": 404
} |
__author__ = 'aaronweaver'
from datetime import datetime
import json
from dojo.models import Finding
class BanditParser(object):
def __init__(self, filename, test):
tree = filename.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
dupes = dict()
if "generated_at" in data:
find_date = datetime.strptime(data["generated_at"], '%Y-%m-%dT%H:%M:%SZ')
for item in data["results"]:
categories = ''
language = ''
mitigation = ''
impact = ''
references = ''
findingdetail = ''
title = ''
group = ''
status = ''
title = "Test Name: " + item["test_name"] + " Test ID: " + item["test_id"]
# ##### Finding details information ######
findingdetail += "Filename: " + item["filename"] + "\n"
findingdetail += "Line number: " + str(item["line_number"]) + "\n"
findingdetail += "Issue Confidence: " + item["issue_confidence"] + "\n\n"
findingdetail += "Code:\n"
findingdetail += item["code"] + "\n"
sev = item["issue_severity"]
mitigation = item["issue_text"]
references = item["test_id"]
dupe_key = title + item["filename"] + str(item["line_number"])
if dupe_key in dupes:
find = dupes[dupe_key]
else:
dupes[dupe_key] = True
find = Finding(title=title,
test=test,
active=False,
verified=False,
description=findingdetail,
severity=sev.title(),
numerical_severity=Finding.get_numerical_severity(sev),
mitigation=mitigation,
impact=impact,
references=references,
file_path=item["filename"],
line=item["line_number"],
url='N/A',
date=find_date,
static_finding=True)
dupes[dupe_key] = find
findingdetail = ''
self.items = list(dupes.values())
| {
"repo_name": "rackerlabs/django-DefectDojo",
"path": "dojo/tools/bandit/parser.py",
"copies": "2",
"size": "2441",
"license": "bsd-3-clause",
"hash": 8624187820962124000,
"line_mean": 34.3768115942,
"line_max": 86,
"alpha_frac": 0.4354772634,
"autogenerated": false,
"ratio": 4.614366729678639,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.604984399307864,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aaronweaver'
from defusedxml import ElementTree
from datetime import datetime
from dojo.models import Finding
class CheckmarxXMLParser(object):
def __init__(self, filename, test):
cxscan = ElementTree.parse(filename)
root = cxscan.getroot()
dupes = dict()
for query in root.findall('Query'):
categories = ''
language = ''
mitigation = ''
impact = ''
references = ''
findingdetail = ''
title = ''
group = ''
status = ''
find_date = root.get("ScanStart")
name = query.get('name')
cwe = query.get('cweId')
if query.get('categories') is not None:
categories = query.get('categories')
if query.get('Language') is not None:
language = query.get('Language')
if query.get('group') is not None:
group = query.get('group').replace('_', ' ')
for result in query.findall('Result'):
deeplink = result.get('DeepLink')
if categories is not None:
findingdetail = 'Category: ' + categories + '\n'
if language is not None:
findingdetail += 'Language: ' + language + '\n'
if group is not None:
findingdetail += 'Group: ' + group + '\n'
if result.get('Status') is not None:
findingdetail += 'Status: ' + result.get('Status') + '\n'
findingdetail += 'Finding Link: ' + deeplink + '\n\n'
dupe_key = categories + cwe + name + result.get('FileName') + result.get('Line')
if dupe_key in dupes:
find = dupes[dupe_key]
else:
dupes[dupe_key] = True
sev = result.get('Severity')
result.get('FileName')
for path in result.findall('Path'):
title = query.get('name').replace('_', ' ') + ' (' + path.get('PathId') + ')'
for pathnode in path.findall('PathNode'):
findingdetail += 'Source Object: %s\n' % (pathnode.find('Name').text)
findingdetail += 'Filename: %s\n' % (pathnode.find('FileName').text)
findingdetail += 'Line Number: %s\n' % (pathnode.find('Line').text)
for codefragment in pathnode.findall('Snippet/Line'):
findingdetail += 'Code: %s\n' % (codefragment.find('Code').text.strip())
findingdetail += '\n'
find = Finding(title=title,
cwe=int(cwe),
test=test,
active=False,
verified=False,
description=findingdetail,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
mitigation=mitigation,
impact=impact,
references=references,
url='N/A',
date=find_date)
dupes[dupe_key] = find
findingdetail = ''
self.items = dupes.values()
| {
"repo_name": "Prakhash/security-tools",
"path": "external/django-DefectDojo-1.2.1/dojo/tools/checkmarx/parser.py",
"copies": "4",
"size": "3573",
"license": "apache-2.0",
"hash": 3517576160266509000,
"line_mean": 37.0106382979,
"line_max": 104,
"alpha_frac": 0.4354883851,
"autogenerated": false,
"ratio": 4.848032564450475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016504828479286325,
"num_lines": 94
} |
__author__ = 'aaronweaver'
from defusedxml import ElementTree
from dateutil import parser
import ntpath
from dojo.utils import add_language
from dojo.models import Finding
class CheckmarxXMLParser(object):
result_dupes = dict()
language_list = []
def __init__(self, filename, test):
cxscan = ElementTree.parse(filename)
root = cxscan.getroot()
dupes = dict()
for query in root.findall('Query'):
categories = ''
language = ''
mitigation = 'N/A'
impact = 'N/A'
references = ''
findingdetail = ''
title = ''
group = ''
status = ''
self.result_dupes = dict()
find_date = parser.parse(root.get("ScanStart"))
name = query.get('name')
cwe = query.get('cweId')
if query.get('categories') is not None:
categories = query.get('categories')
if query.get('Language') is not None:
language = query.get('Language')
if query.get('group') is not None:
group = query.get('group').replace('_', ' ')
for result in query.findall('Result'):
if categories is not None:
findingdetail = "{}**Category:** {}\n".format(findingdetail, categories)
if language is not None:
findingdetail = "{}**Language:** {}\n".format(findingdetail, language)
if language not in self.language_list:
self.language_list.append(language)
if group is not None:
findingdetail = "{}**Group:** {}\n".format(findingdetail, group)
if result.get('Status') is not None:
findingdetail = "{}**Status:** {}\n".format(findingdetail, result.get('Status'))
deeplink = "[{}]({})".format(result.get('DeepLink'), result.get('DeepLink'))
findingdetail = "{}**Finding Link:** {}\n\n".format(findingdetail, deeplink)
dupe_key = "{}{}{}{}".format(categories, cwe, name, result.get('FileName').encode('utf-8'))
if dupe_key in dupes:
find = dupes[dupe_key]
title, description, pathnode = self.get_finding_detail(query, result)
"{}\n{}".format(find.description, description)
dupes[dupe_key] = find
else:
dupes[dupe_key] = True
sev = result.get('Severity')
result.get('FileName')
title, description, pathnode = self.get_finding_detail(query, result)
find = Finding(title=title,
cwe=int(cwe),
test=test,
active=False,
verified=False,
description=findingdetail + description,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
mitigation=mitigation,
impact=impact,
references=references,
file_path=pathnode.find('FileName').text,
line=pathnode.find('Line').text,
url='N/A',
date=find_date,
static_finding=True)
dupes[dupe_key] = find
findingdetail = ''
for lang in self.language_list:
add_language(test.engagement.product, lang)
self.items = list(dupes.values())
def get_finding_detail(self, query, result):
findingdetail = ""
title = ""
for path in result.findall('Path'):
title = query.get('name').replace('_', ' ')
for pathnode in path.findall('PathNode'):
result_dupes_key = pathnode.find('Line').text + "|" + pathnode.find('Column').text
if result_dupes_key not in self.result_dupes:
if pathnode.find('Line').text is not None:
findingdetail = "{}**Line Number:** {}\n".format(findingdetail, pathnode.find('Line').text)
if pathnode.find('Column').text is not None:
findingdetail = "{}**Column:** {}\n".format(findingdetail, pathnode.find('Column').text)
if pathnode.find('Name').text is not None:
findingdetail = "{}**Source Object:** {}\n".format(findingdetail, pathnode.find('Name').text)
for codefragment in pathnode.findall('Snippet/Line'):
findingdetail = "{}**Number:** {}\n**Code:** {}\n".format(findingdetail, codefragment.find('Number').text, codefragment.find('Code').text.strip())
findingdetail = '{}-----\n'.format(findingdetail)
self.result_dupes[result_dupes_key] = True
if title and pathnode.find('FileName').text:
title = "{} ({})".format(title, ntpath.basename(pathnode.find('FileName').text))
return title, findingdetail, pathnode
| {
"repo_name": "OWASP/django-DefectDojo",
"path": "dojo/tools/checkmarx/parser.py",
"copies": "1",
"size": "5389",
"license": "bsd-3-clause",
"hash": 8047831438161155000,
"line_mean": 40.7751937984,
"line_max": 170,
"alpha_frac": 0.4910001856,
"autogenerated": false,
"ratio": 4.609923011120616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5600923196720616,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Aaron Weaver'
from dojo.models import Endpoint, Finding
from datetime import datetime
import json
class SSLlabsParser(object):
def __init__(self, filename, test):
tree = filename.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
find_date = datetime.now()
dupes = {}
for host in data:
ssl_endpoints = []
hostName = ""
if "host" in host:
hostName = host["host"]
if "endpoints" in host:
ssl_endpoints = host["endpoints"]
for endpoints in ssl_endpoints:
categories = ''
language = ''
mitigation = 'N/A'
impact = 'N/A'
references = ''
findingdetail = ''
title = ''
group = ''
status = ''
port = ''
ipAddress = ''
protocol = ''
grade = ""
if "grade" in endpoints:
grade = endpoints["grade"]
port = ""
if "port" in host:
port = host["port"]
ipAddress = ""
if "ipAddress" in endpoints:
ipAddress = endpoints["ipAddress"]
protocol = ""
if "protocol" in host:
protocol = host["protocol"]
title = "TLS Grade '%s' for %s" % (grade, hostName)
sev = self.getCriticalityRating(grade)
description = "%s \n\n" % title
cert = ""
if "cert" in endpoints["details"]:
cert = endpoints["details"]["cert"]
description = "%sCertifcate Subject: %s\n" % (description, cert["subject"])
description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"])
description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"])
else:
for cert in host["certs"]:
description = "%sCertifcate Subject: %s\n" % (description, cert["subject"])
description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"])
description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"])
protocol_str = ""
for protocol_data in endpoints["details"]["protocols"]:
protocol_str += protocol_data["name"] + " " + protocol_data["version"] + "\n"
if protocol_str:
description += "\nProtocols:\n" + protocol_str
description += "\nSuites List: "
suite_info = ""
try:
if "list" in endpoints["details"]["suites"]:
for suites in endpoints["details"]["suites"]["list"]:
suite_info = suite_info + self.suite_data(suites)
elif "suites" in endpoints["details"]:
for item in endpoints["details"]["suites"]:
for suites in item["list"]:
suite_info = suite_info + self.suite_data(suites)
except:
suite_info = "Not provided." + "\n\n"
description += suite_info
description += "Additional Information:\n\n"
if "serverSignature" in endpoints["details"]:
description += "serverSignature: " + endpoints["details"]["serverSignature"] + "\n"
if "prefixDelegation" in endpoints["details"]:
description += "prefixDelegation: " + str(endpoints["details"]["prefixDelegation"]) + "\n"
if "nonPrefixDelegation" in endpoints["details"]:
description += "nonPrefixDelegation: " + str(endpoints["details"]["nonPrefixDelegation"]) + "\n"
if "vulnBeast" in endpoints["details"]:
description += "vulnBeast: " + str(endpoints["details"]["vulnBeast"]) + "\n"
if "renegSupport" in endpoints["details"]:
description += "renegSupport: " + str(endpoints["details"]["renegSupport"]) + "\n"
if "stsStatus" in endpoints["details"]:
description += "stsStatus: " + endpoints["details"]["stsStatus"] + "\n"
if "stsResponseHeader" in endpoints["details"]:
description += "stsResponseHeader: " + endpoints["details"]["stsResponseHeader"] + "\n"
if "stsPreload" in endpoints["details"]:
description += "stsPreload: " + str(endpoints["details"]["stsPreload"]) + "\n"
if "sessionResumption" in endpoints["details"]:
description += "sessionResumption: " + str(endpoints["details"]["sessionResumption"]) + "\n"
if "compressionMethods" in endpoints["details"]:
description += "compressionMethods: " + str(endpoints["details"]["compressionMethods"]) + "\n"
if "supportsNpn" in endpoints["details"]:
description += "supportsNpn: " + str(endpoints["details"]["supportsNpn"]) + "\n"
if "supportsAlpn" in endpoints["details"]:
description += "supportsAlpn: " + str(endpoints["details"]["supportsAlpn"]) + "\n"
if "sessionTickets" in endpoints["details"]:
description += "sessionTickets: " + str(endpoints["details"]["sessionTickets"]) + "\n"
if "ocspStapling" in endpoints["details"]:
description += "ocspStapling: " + str(endpoints["details"]["ocspStapling"]) + "\n"
if "sniRequired" in endpoints["details"]:
description += "sniRequired: " + str(endpoints["details"]["sniRequired"]) + "\n"
if "httpStatusCode" in endpoints["details"]:
description += "httpStatusCode: " + str(endpoints["details"]["httpStatusCode"]) + "\n"
if "supportsRc4" in endpoints["details"]:
description += "supportsRc4: " + str(endpoints["details"]["supportsRc4"]) + "\n"
if "rc4WithModern" in endpoints["details"]:
description += "rc4WithModern: " + str(endpoints["details"]["rc4WithModern"]) + "\n"
if "forwardSecrecy" in endpoints["details"]:
description += "forwardSecrecy: " + str(endpoints["details"]["forwardSecrecy"]) + "\n"
if "protocolIntolerance" in endpoints["details"]:
description += "protocolIntolerance: " + str(endpoints["details"]["protocolIntolerance"]) + "\n"
if "miscIntolerance" in endpoints["details"]:
description += "miscIntolerance: " + str(endpoints["details"]["miscIntolerance"]) + "\n"
if "heartbleed" in endpoints["details"]:
description += "heartbleed: " + str(endpoints["details"]["heartbleed"]) + "\n"
if "heartbeat" in endpoints["details"]:
description += "heartbeat: " + str(endpoints["details"]["heartbeat"]) + "\n"
if "openSslCcs" in endpoints["details"]:
description += "openSslCcs: " + str(endpoints["details"]["openSslCcs"]) + "\n"
if "openSSLLuckyMinus20" in endpoints["details"]:
description += "openSSLLuckyMinus20: " + str(endpoints["details"]["openSSLLuckyMinus20"]) + "\n"
if "poodle" in endpoints["details"]:
description += "poodle: " + str(endpoints["details"]["poodle"]) + "\n"
if "poodleTls" in endpoints["details"]:
description += "poodleTls: " + str(endpoints["details"]["poodleTls"]) + "\n"
if "fallbackScsv" in endpoints["details"]:
description += "fallbackScsv: " + str(endpoints["details"]["fallbackScsv"]) + "\n"
if "freak" in endpoints["details"]:
description += "freak: " + str(endpoints["details"]["freak"]) + "\n"
if "hasSct" in endpoints["details"]:
description += "hasSct: " + str(endpoints["details"]["hasSct"]) + "\n"
"""
cName = ""
for commonNames in cert["commonNames"]:
cName = "%s %s \n" % (cName, commonNames)
aName = ""
for altNames in cert["altNames"]:
aName = "%s %s \n" % (aName, altNames)
"""
protoName = ""
for protocols in endpoints["details"]["protocols"]:
protoName = "%s %s %s\n" % (protoName, protocols["name"], protocols["version"])
dupe_key = hostName + grade
if dupe_key in dupes:
find = dupes[dupe_key]
if description is not None:
find.description += description
else:
find = Finding(title=title,
cwe=310, # Cryptographic Issues
test=test,
active=False,
verified=False,
description=description,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
mitigation=mitigation,
impact=impact,
references=references,
url=host,
date=find_date,
dynamic_finding=True)
dupes[dupe_key] = find
find.unsaved_endpoints = list()
find.unsaved_endpoints.append(Endpoint(host=ipAddress, fqdn=hostName, port=port, protocol=protocol))
self.items = list(dupes.values())
# Criticality rating
# Grades: https://github.com/ssllabs/research/wiki/SSL-Server-Rating-Guide
# A - Info, B - Medium, C - High, D/F/M/T - Critical
def getCriticalityRating(self, rating):
criticality = "Info"
if "A" in rating:
criticality = "Info"
elif "B" in rating:
criticality = "Medium"
elif "C" in rating:
criticality = "High"
elif "D" in rating or "F" in rating or "M" in rating or "T" in rating:
criticality = "Critical"
return criticality
def suite_data(self, suites):
suite_info = ""
suite_info += suites["name"] + "\n"
suite_info += "Cipher Strength: " + str(suites["cipherStrength"]) + "\n"
if "ecdhBits" in suites:
suite_info += "ecdhBits: " + str(suites["ecdhBits"]) + "\n"
if "ecdhStrength" in suites:
suite_info += "ecdhStrength: " + str(suites["ecdhStrength"])
suite_info += "\n\n"
return suite_info
| {
"repo_name": "rackerlabs/django-DefectDojo",
"path": "dojo/tools/ssl_labs/parser.py",
"copies": "1",
"size": "11268",
"license": "bsd-3-clause",
"hash": -1315198056133747700,
"line_mean": 50.2181818182,
"line_max": 116,
"alpha_frac": 0.4875754349,
"autogenerated": false,
"ratio": 4.604822231303637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023679392160987685,
"num_lines": 220
} |
__author__ = 'Aaron Weaver'
from dojo.models import Endpoint, Finding
from datetime import datetime
import json
class SSLlabsParser(object):
def __init__(self, filename, test):
tree = json_output.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
find_date = datetime.now()
dupes = {}
for host in data:
ssl_endpoints = []
hostName = ""
if "host" in host:
hostName = host["host"]
if "endpoints" in host:
ssl_endpoints = host["endpoints"]
for endpoints in ssl_endpoints:
categories = ''
language = ''
mitigation = 'N/A'
impact = 'N/A'
references = ''
findingdetail = ''
title = ''
group = ''
status = ''
port = ''
ipAddress = ''
protocol = ''
grade = ""
if "grade" in endpoints:
grade = endpoints["grade"]
port = ""
if "port" in host:
port = host["port"]
ipAddress = ""
if "ipAddress" in endpoints:
ipAddress = endpoints["ipAddress"]
protocol = ""
if "protocol" in host:
protocol = host["protocol"]
title = "TLS Grade '%s' for %s" % (grade, hostName)
sev = self.getCriticalityRating(grade)
description = "%s \n\n" % title
cert = ""
if "cert" in endpoints["details"]:
cert = endpoints["details"]["cert"]
description = "%sCertifcate Subject: %s\n" % (description, cert["subject"])
description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"])
description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"])
else:
for cert in host["certs"]:
description = "%sCertifcate Subject: %s\n" % (description, cert["subject"])
description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"])
description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"])
protocol_str = ""
for protocol_data in endpoints["details"]["protocols"]:
protocol_str += protocol_data["name"] + " " + protocol_data["version"] + "\n"
if protocol_str:
description += "\nProtocols:\n" + protocol_str
description += "\nSuites List: "
suite_info = ""
try:
if "list" in endpoints["details"]["suites"]:
for suites in endpoints["details"]["suites"]["list"]:
suite_info = suite_info + self.suite_data(suites)
elif "suites" in endpoints["details"]:
for item in endpoints["details"]["suites"]:
for suites in item["list"]:
suite_info = suite_info + self.suite_data(suites)
except:
suite_info = "Not provided." + "\n\n"
description += suite_info
description += "Additional Information:\n\n"
if "serverSignature" in endpoints["details"]:
description += "serverSignature: " + endpoints["details"]["serverSignature"] + "\n"
if "prefixDelegation" in endpoints["details"]:
description += "prefixDelegation: " + str(endpoints["details"]["prefixDelegation"]) + "\n"
if "nonPrefixDelegation" in endpoints["details"]:
description += "nonPrefixDelegation: " + str(endpoints["details"]["nonPrefixDelegation"]) + "\n"
if "vulnBeast" in endpoints["details"]:
description += "vulnBeast: " + str(endpoints["details"]["vulnBeast"]) + "\n"
if "renegSupport" in endpoints["details"]:
description += "renegSupport: " + str(endpoints["details"]["renegSupport"]) + "\n"
if "stsStatus" in endpoints["details"]:
description += "stsStatus: " + endpoints["details"]["stsStatus"] + "\n"
if "stsResponseHeader" in endpoints["details"]:
description += "stsResponseHeader: " + endpoints["details"]["stsResponseHeader"] + "\n"
if "stsPreload" in endpoints["details"]:
description += "stsPreload: " + str(endpoints["details"]["stsPreload"]) + "\n"
if "sessionResumption" in endpoints["details"]:
description += "sessionResumption: " + str(endpoints["details"]["sessionResumption"]) + "\n"
if "compressionMethods" in endpoints["details"]:
description += "compressionMethods: " + str(endpoints["details"]["compressionMethods"]) + "\n"
if "supportsNpn" in endpoints["details"]:
description += "supportsNpn: " + str(endpoints["details"]["supportsNpn"]) + "\n"
if "supportsAlpn" in endpoints["details"]:
description += "supportsAlpn: " + str(endpoints["details"]["supportsAlpn"]) + "\n"
if "sessionTickets" in endpoints["details"]:
description += "sessionTickets: " + str(endpoints["details"]["sessionTickets"]) + "\n"
if "ocspStapling" in endpoints["details"]:
description += "ocspStapling: " + str(endpoints["details"]["ocspStapling"]) + "\n"
if "sniRequired" in endpoints["details"]:
description += "sniRequired: " + str(endpoints["details"]["sniRequired"]) + "\n"
if "httpStatusCode" in endpoints["details"]:
description += "httpStatusCode: " + str(endpoints["details"]["httpStatusCode"]) + "\n"
if "supportsRc4" in endpoints["details"]:
description += "supportsRc4: " + str(endpoints["details"]["supportsRc4"]) + "\n"
if "rc4WithModern" in endpoints["details"]:
description += "rc4WithModern: " + str(endpoints["details"]["rc4WithModern"]) + "\n"
if "forwardSecrecy" in endpoints["details"]:
description += "forwardSecrecy: " + str(endpoints["details"]["forwardSecrecy"]) + "\n"
if "protocolIntolerance" in endpoints["details"]:
description += "protocolIntolerance: " + str(endpoints["details"]["protocolIntolerance"]) + "\n"
if "miscIntolerance" in endpoints["details"]:
description += "miscIntolerance: " + str(endpoints["details"]["miscIntolerance"]) + "\n"
if "heartbleed" in endpoints["details"]:
description += "heartbleed: " + str(endpoints["details"]["heartbleed"]) + "\n"
if "heartbeat" in endpoints["details"]:
description += "heartbeat: " + str(endpoints["details"]["heartbeat"]) + "\n"
if "openSslCcs" in endpoints["details"]:
description += "openSslCcs: " + str(endpoints["details"]["openSslCcs"]) + "\n"
if "openSSLLuckyMinus20" in endpoints["details"]:
description += "openSSLLuckyMinus20: " + str(endpoints["details"]["openSSLLuckyMinus20"]) + "\n"
if "poodle" in endpoints["details"]:
description += "poodle: " + str(endpoints["details"]["poodle"]) + "\n"
if "poodleTls" in endpoints["details"]:
description += "poodleTls: " + str(endpoints["details"]["poodleTls"]) + "\n"
if "fallbackScsv" in endpoints["details"]:
description += "fallbackScsv: " + str(endpoints["details"]["fallbackScsv"]) + "\n"
if "freak" in endpoints["details"]:
description += "freak: " + str(endpoints["details"]["freak"]) + "\n"
if "hasSct" in endpoints["details"]:
description += "hasSct: " + str(endpoints["details"]["hasSct"]) + "\n"
"""
cName = ""
for commonNames in cert["commonNames"]:
cName = "%s %s \n" % (cName, commonNames)
aName = ""
for altNames in cert["altNames"]:
aName = "%s %s \n" % (aName, altNames)
"""
protoName = ""
for protocols in endpoints["details"]["protocols"]:
protoName = "%s %s %s\n" % (protoName, protocols["name"], protocols["version"])
dupe_key = hostName + grade
if dupe_key in dupes:
find = dupes[dupe_key]
if description is not None:
find.description += description
else:
find = Finding(title=title,
cwe=310, # Cryptographic Issues
test=test,
active=False,
verified=False,
description=description,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
mitigation=mitigation,
impact=impact,
references=references,
url=host,
date=find_date,
dynamic_finding=True)
dupes[dupe_key] = find
find.unsaved_endpoints = list()
find.unsaved_endpoints.append(Endpoint(host=ipAddress, fqdn=hostName, port=port, protocol=protocol))
self.items = list(dupes.values())
# Criticality rating
# Grades: https://github.com/ssllabs/research/wiki/SSL-Server-Rating-Guide
# A - Info, B - Medium, C - High, D/F/M/T - Critical
def getCriticalityRating(self, rating):
criticality = "Info"
if "A" in rating:
criticality = "Info"
elif "B" in rating:
criticality = "Medium"
elif "C" in rating:
criticality = "High"
elif "D" in rating or "F" in rating or "M" in rating or "T" in rating:
criticality = "Critical"
return criticality
def suite_data(self, suites):
suite_info = ""
suite_info += suites["name"] + "\n"
suite_info += "Cipher Strength: " + str(suites["cipherStrength"]) + "\n"
if "ecdhBits" in suites:
suite_info += "ecdhBits: " + str(suites["ecdhBits"]) + "\n"
if "ecdhStrength" in suites:
suite_info += "ecdhStrength: " + str(suites["ecdhStrength"])
suite_info += "\n\n"
return suite_info
| {
"repo_name": "OWASP/django-DefectDojo",
"path": "dojo/tools/ssl_labs/parser.py",
"copies": "1",
"size": "11271",
"license": "bsd-3-clause",
"hash": 13686470994533472,
"line_mean": 50.2318181818,
"line_max": 116,
"alpha_frac": 0.4876231035,
"autogenerated": false,
"ratio": 4.60228664761127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023679392160987685,
"num_lines": 220
} |
__author__ = 'aaronweaver'
import pandas as pd
import hashlib
from dojo.models import Finding, Endpoint
class ContrastCSVParser(object):
def __init__(self, filename, test):
dupes = dict()
self.items = ()
if filename is None:
self.items = ()
return
df = pd.read_csv(filename, header=0)
for i, row in df.iterrows():
# Vulnerability Name,Vulnerability ID,Category,Rule Name,Severity,Status,Number of Events,First Seen,Last Seen,Application Name,Application ID,Application Code,CWE ID,Request Method,Request Port,Request Protocol,Request Version,Request URI,Request Qs,Request Body
cwe = self.format_cwe(df.ix[i, 'CWE ID'])
title = df.ix[i, 'Rule Name']
category = df.ix[i, 'Category']
description = self.format_description(df, i)
severity = df.ix[i, 'Severity']
if severity == "Note":
severity = "Info"
mitigation = "N/A"
impact = "N/A"
references = "N/A"
dupe_key = hashlib.md5(category.encode('utf-8') + str(cwe).encode('utf-8') + title.encode('utf-8')).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
if finding.description:
finding.description = finding.description + "\nVulnerability ID: " + \
df.ix[i, 'Vulnerability ID'] + "\n" + \
df.ix[i, 'Vulnerability Name'] + "\n"
self.process_endpoints(finding, df, i)
dupes[dupe_key] = finding
else:
dupes[dupe_key] = True
finding = Finding(title=title,
cwe=int(cwe),
test=test,
active=False,
verified=False,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(
severity),
mitigation=mitigation,
impact=impact,
references=references,
url='N/A',
dynamic_finding=True)
dupes[dupe_key] = finding
self.process_endpoints(finding, df, i)
self.items = list(dupes.values())
def format_description(self, df, i):
description = "Request URI: " + str(df.ix[i, 'Request URI']) + "\n"
description = "Rule Name: " + df.ix[i, 'Rule Name'] + "\n"
description = "Vulnerability ID: " + \
df.ix[i, 'Vulnerability ID'] + "\n"
description = description + df.ix[i, 'Vulnerability Name'] + "\n\n"
if pd.isnull(df.ix[i, 'Request Qs']) is False:
description = description + "Request QueryString: " + \
str(df.ix[i, 'Request Qs']) + "\n"
if pd.isnull(df.ix[i, 'Request Body']):
description = description + "Request Body: " + \
str(df.ix[i, 'Request Body']) + "\n"
return description
def format_cwe(self, url):
# Get the last path
filename = url.rsplit('/', 1)[1]
# Split out the . to get the CWE id
filename = filename.split('.')[0]
return filename
def process_endpoints(self, finding, df, i):
protocol = "http"
host = "0.0.0.0"
query = ""
fragment = ""
path = df.ix[i, 'Request URI']
if pd.isnull(path) is False:
try:
dupe_endpoint = Endpoint.objects.get(protocol="protocol",
host=host,
query=query,
fragment=fragment,
path=path,
product=finding.test.engagement.product)
except Endpoint.DoesNotExist:
dupe_endpoint = None
if not dupe_endpoint:
endpoint = Endpoint(protocol=protocol,
host=host,
query=query,
fragment=fragment,
path=path,
product=finding.test.engagement.product)
else:
endpoint = dupe_endpoint
if not dupe_endpoint:
endpoints = [endpoint]
else:
endpoints = [endpoint, dupe_endpoint]
finding.unsaved_endpoints = finding.unsaved_endpoints + endpoints
| {
"repo_name": "OWASP/django-DefectDojo",
"path": "dojo/tools/contrast/parser.py",
"copies": "2",
"size": "4897",
"license": "bsd-3-clause",
"hash": -6651149804168665000,
"line_mean": 39.1393442623,
"line_max": 275,
"alpha_frac": 0.4621196651,
"autogenerated": false,
"ratio": 4.576635514018691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6038755179118691,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aaronweaver'
import re
from defusedxml import ElementTree as ET
import hashlib
from urllib.parse import urlparse
from dojo.models import Finding, Endpoint
class NiktoXMLParser(object):
def __init__(self, filename, test):
dupes = dict()
self.items = ()
if filename is None:
self.items = ()
return
tree = ET.parse(filename)
root = tree.getroot()
scan = root.find('scandetails')
# New versions of Nikto have a new file type (nxvmlversion="1.2") which adds an additional niktoscan tag
# This find statement below is to support new file format while not breaking older Nikto scan files versions.
if scan is None:
scan = root.find('./niktoscan/scandetails')
for item in scan.findall('item'):
# Title
titleText = None
description = item.find("description").text
# Cut the title down to the first sentence
sentences = re.split(
r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', description)
if len(sentences) > 0:
titleText = sentences[0][:900]
else:
titleText = description[:900]
# Url
ip = item.find("iplink").text
# Remove the port numbers for 80/443
ip = ip.replace(":80", "")
ip = ip.replace(":443", "")
# Severity
severity = "Info" # Nikto doesn't assign severity, default to Info
# Description
description = "\n \n".join((("Host: " + ip),
("Description: " + item.find("description").text),
("HTTP Method: " + item.attrib["method"]),
))
mitigation = "N/A"
impact = "N/A"
references = "N/A"
dupe_key = hashlib.md5(description.encode("utf-8")).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
if finding.description:
finding.description = finding.description + "\nHost:" + ip + "\n" + description
self.process_endpoints(finding, ip)
dupes[dupe_key] = finding
else:
dupes[dupe_key] = True
finding = Finding(title=titleText,
test=test,
active=False,
verified=False,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(
severity),
mitigation=mitigation,
impact=impact,
references=references,
url='N/A',
dynamic_finding=True)
dupes[dupe_key] = finding
self.process_endpoints(finding, ip)
self.items = list(dupes.values())
def process_endpoints(self, finding, host):
protocol = "http"
query = ""
fragment = ""
path = ""
url = urlparse(host)
if url:
path = url.path
rhost = re.search(
"(http|https|ftp)\://([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.(com|edu|gov|int|mil|net|org|biz|arpa|info|name|pro|aero|coop|museum|[a-zA-Z]{2}))[\:]*([0-9]+)*([/]*($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+)).*?$",
host)
protocol = rhost.group(1)
host = rhost.group(4)
try:
dupe_endpoint = Endpoint.objects.get(protocol="protocol",
host=host,
query=query,
fragment=fragment,
path=path,
product=finding.test.engagement.product)
except Endpoint.DoesNotExist:
dupe_endpoint = None
if not dupe_endpoint:
endpoint = Endpoint(protocol=protocol,
host=host,
query=query,
fragment=fragment,
path=path,
product=finding.test.engagement.product)
else:
endpoint = dupe_endpoint
if not dupe_endpoint:
endpoints = [endpoint]
else:
endpoints = [endpoint, dupe_endpoint]
finding.unsaved_endpoints = finding.unsaved_endpoints + endpoints
| {
"repo_name": "rackerlabs/django-DefectDojo",
"path": "dojo/tools/nikto/parser.py",
"copies": "2",
"size": "5143",
"license": "bsd-3-clause",
"hash": -7994457130637868000,
"line_mean": 38.5615384615,
"line_max": 534,
"alpha_frac": 0.4458487264,
"autogenerated": false,
"ratio": 4.036891679748822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5482740406148823,
"avg_score": null,
"num_lines": null
} |
import argparse
import os
import pickle
import subprocess
import sys
import smtplib
comps = ['neutron', 'python-neutronclient', 'horizon']
rmadison_cmd = 'rmadison %(comp)s | grep "%(ppa)s "'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--ppa', required = True,
help = 'ppa to grep version')
parser.add_argument('-f', '--filepath', required = True,
help = 'the file to store versions')
args = parser.parse_args()
changes = {}
try:
versions = pickle.load(open(args.filepath))
except Exception:
versions = {}
for comp in comps:
try:
output = subprocess.check_output(rmadison_cmd % {'comp': comp,
'ppa': args.ppa},
shell=True)
# e.g. "neutron | 1:2014.1.1-0ubuntu2 | trusty-updates | source"
version, release = output.split('|')[1].split(':')[1].split('-')
if versions.get(comp, None) != version:
changes[comp] = {'old': versions.get(comp, None), 'new': version}
versions[comp] = version
except subprocess.CalledProcessError as e:
# info for this comp and ppa doesn't exist
print e
except KeyError:
# previous comp version info doesn't exist
versions[comp] = version
if changes:
# send email
print 'change set: %s' % changes
subprocess.check_output(('ssh cwchang@10.28.29.132 "printf Need-rebase | mail -s \\"trusty rebase warning\\" cwchang@cisco.com"'), shell=True)
with open(args.filepath, 'w') as f:
pickle.dump(versions, f)
| {
"repo_name": "CiscoSystems/n1kv-openstack-tools",
"path": "canonical/rebase_warning.py",
"copies": "1",
"size": "1806",
"license": "apache-2.0",
"hash": -2257821476985970700,
"line_mean": 35.8571428571,
"line_max": 150,
"alpha_frac": 0.5470653378,
"autogenerated": false,
"ratio": 4.004434589800444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051499927600444,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abc'
from pyquery import PyQuery as pq
import datetime
def removeScript(input, output):
with open(input) as f1:
with open(output, 'w') as f2:
scriptFlag = 0
for eachline in f1:
tagStartCnt = eachline.count('<script')
tagStopCnt = eachline.count('/script>')
if tagStartCnt == tagStopCnt+1 :
if scriptFlag == 0:
scriptFlag = 1
# print eachline
else:
print '----------------' + eachline
elif tagStartCnt+1 == tagStopCnt :
if scriptFlag == 1:
scriptFlag = 0
# print eachline
else:
print '----------------' + eachline
elif tagStartCnt == tagStopCnt and tagStartCnt != 0:
# print eachline
pass
else:
if scriptFlag==0:
f2.write(eachline)
exportFile = 'doubanMyMovie/output'
outputFile = 'doubanMyMovie/tmp'
if __name__ == '__main__':
MovieList = []
for i in range(1282/30+1):
filenName = '%s_%02d' % (exportFile, i)
removeScript(filenName, outputFile)
with open(outputFile) as f:
a = f.read()
d = pq(a)
p = d('.item-show')
# print len(p)
for each in p:
items = each.findall('div')
OneMovie = {}
for item in items:
if item.attrib['class'] == 'title':
href = item.find('a').attrib['href']
title = item.find('a').text.strip().replace(',', '_', 100)
OneMovie['title'] = title
OneMovie['href'] = href
pass
if item.attrib['class'] == 'date':
date = item.find('span').tail.strip()
OneMovie['date'] = date
pass
print '%s, %s, %s' % (OneMovie['date'], OneMovie['title'], OneMovie['href'])
MovieList.append(OneMovie)
cnt2010 = {}
cnt2011 = {}
cnt2012 = {}
cnt2013 = {}
for i in range(365+1):
cnt2010[i] = 0
cnt2011[i] = 0
cnt2012[i] = 0
cnt2013[i] = 0
for each in MovieList:
y,m,d = each['date'].split('-')
d1 = datetime.date(int(y),int(m),int(d)).timetuple()
if d1.tm_year == 2010:
cnt2010[d1.tm_yday] += 1
# print '%d, %d, %s, %s, %s' % (d1.tm_yday, cnt2010[d1.tm_yday], y, m, d)
if d1.tm_year == 2011:
cnt2011[d1.tm_yday] += 1
if d1.tm_year == 2012:
cnt2012[d1.tm_yday] += 1
if d1.tm_year == 2013:
cnt2013[d1.tm_yday] += 1
print '-----------------------------------------'
for i in range(365+1):
print cnt2010[i] | {
"repo_name": "SamhooXee/other",
"path": "douban/douban_parse_131106A.py",
"copies": "1",
"size": "3041",
"license": "mit",
"hash": -8750948564373744000,
"line_mean": 34.7882352941,
"line_max": 92,
"alpha_frac": 0.420914173,
"autogenerated": false,
"ratio": 3.964797913950456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9869047678492251,
"avg_score": 0.003332881691641096,
"num_lines": 85
} |
__author__ = 'Abdullah AA'
from html.parser import HTMLParser
class ImageParser(HTMLParser):
image_links = []
image_names = []
def handle_starttag(self, tag, attrs):
target_found = False
if tag == 'a':
for temp_tuple in attrs:
if temp_tuple[0] == 'class' and temp_tuple[1] == 'fileThumb':
target_found = True
if target_found:
for temp_tuple in attrs:
if temp_tuple[0] == 'href':
self.image_links.append(temp_tuple[1])
def set_image_name(self):
for link in self.image_links:
big_image_name = list(link)
counter = len(big_image_name) - 1
regular_image_name = ''
while big_image_name[counter] != '/':
counter -= 1
counter += 1
while counter < len(big_image_name):
regular_image_name += big_image_name[counter]
counter += 1
self.image_names.append(regular_image_name) | {
"repo_name": "Ansari90/grab_n_save",
"path": "ResponseParser.py",
"copies": "1",
"size": "1049",
"license": "mit",
"hash": -428309185360462660,
"line_mean": 27.3783783784,
"line_max": 77,
"alpha_frac": 0.515729266,
"autogenerated": false,
"ratio": 3.988593155893536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004322421893536,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdullah'
from api import views
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^contents$', views.Contents.as_view()),
url(r'^links', views.Links.as_view()),
url(r'^contents/(?P<content_link>\w+)$', views.Contents.as_view()),
# url(r'^users$', views.Users.as_view()),
url(r'^user/', include('membership.urls')),
url(r'^tracks/requirements$', views.TrackRequirements.as_view()),
url(r'^tracks$', views.CompositionView.as_view()),
url(r'^tracks/(?P<user_id>\d+)$', views.CompositionView.as_view()),
url(r'^tracks/(?P<list_type>\w+)$', views.CompositionView.as_view()),
url(r'^vote', views.VoteView.as_view()),
url(r'^contests$', views.ContestView.as_view()),
url(r'^contests/(?P<contest_year>\d+)$', views.ContestView.as_view()),
# url(r'^user($', views.SignUp.as_view()),
# url(r'^manufacturers$', views.Manufacturers.as_view()),
# url(r'^extras/$', views.Extras.as_view()),
# url(r'^auth/', views.AuthView.as_view(), name='auth-view'),
# url(r'^deneme/', views.Deneme.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'api'])
| {
"repo_name": "haliciyazilim/beste-yarismasi",
"path": "server/api/urls.py",
"copies": "1",
"size": "1222",
"license": "mit",
"hash": 1936785145095909400,
"line_mean": 42.6428571429,
"line_max": 74,
"alpha_frac": 0.6415711948,
"autogenerated": false,
"ratio": 3.0936708860759494,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.42352420808759494,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AbdullahS'
from pprint import pprint, pformat # NOQA
from hydra.lib import util
import time
import sys
import logging
import os
import random
from hydra.lib.childmgr import ChildManager
l = util.createlogger('MOCKAPI', logging.INFO)
# l.setLevel(logging.DEBUG)
class TaskInfo(object):
"""
TaskInfo class that mimics marathon
TaskInfo object.
Note: This should not be considered a
copy of marathon TaskInfo. Any
required attribtue will need to be added
on need basis.
"""
def __init__(self):
self.id = None
self.ports = []
class AppInfo(object):
"""
AppInfo class that holds information
about launched processes.
Instantiates ChildManager() that
is unique to an instantiation of AppInfo object.
"""
def __init__(self):
# Initiate ChildManager with sighandler=False,
# since we do not parent (in this case us) exiting
# if a child dies
self.cmgr = ChildManager(sighandler=False)
self.id = None
self.host = ""
self.tasks = []
self.deployments = []
self.tasks_running = 0
class MockMarathonIF(object):
"""
MockMarathonIF object. Exposes
the APIs used by hydra infra.
Governs the bookkeeping for all launched processes.
@args:
marathon_addr : dummy value provided by hydra infra
my_addr : dummy value provided by hydra infra
mesos : MockMesosIF instance
"""
def __init__(self, marathon_addr, my_addr, mesos):
l.info("MockMarathonIF init")
self.myAddr = my_addr
self.mesos = mesos
self.total_ports = 100
self.port_index = 0
self.generate_env_ports()
self.list_apps = {}
self.app_attr = {}
def generate_env_ports(self):
"""
Generates a list of random numbers
to be passed on to child processes as ports
"""
self.env_ports = []
for x in range(self.total_ports):
self.env_ports.append(random.randrange(10000, 20000))
def get_apps(self):
"""
Return list of all launched apps
"""
return self.list_apps
def get_app(self, app_id):
"""
Return AppInfo object for respective app
@args:
app_id : unique app id
"""
if app_id in self.list_apps:
return self.list_apps[app_id]
l.info("No app named [%s] exists", app_id)
return None
def delete_app(self, app_id, force=False):
"""
Delete app, terminates all child processes
@args:
app_id : unique app id
force : unused, hydra infra compatibility
"""
l.info("Deleting [%s]", app_id)
a = self.get_app(app_id)
l.info(a)
a.cmgr.terminate_process_and_children(app_id)
if self.app_attr[app_id][1] > 1:
scale_app_name = app_id + "-scale"
a.cmgr.terminate_process_and_children(scale_app_name)
del self.list_apps[app_id]
del self.app_attr[app_id]
def delete_deployment(self, dep_id):
return
def get_deployments(self):
return
def delete_app_ifexisting(self, app_id, trys=4):
"""
Delete app if existing terminates all child processes
@args:
app_id : unique app id
trys : retry count
"""
for idx in range(0, trys):
try:
a = self.get_app(app_id)
if a:
return self.delete_app(app_id)
return None
except:
e = sys.exc_info()[0]
pprint("<p>Error: %s</p>" % e)
time.sleep(10)
raise
def create_app(self, app_id, attr, app_local_launch_name=""):
"""
Launch the requested app by hydra infra
Uses ChildManager inside AppInfo to launch
child processes as tasks.
@args:
app_id : unique app id
attr : hydra MarathonApp instance, creates all app attributes
app_local_launch_name : Launch name for the app, needs to be different for subsequent
launches e-g scaling app
"""
# Prepare process data
if not app_local_launch_name:
app_local_launch_name = app_id
cmd = attr.cmd
requested_ports = len(attr.ports)
cmd = cmd[cmd.rfind("./hydra"):len(cmd)]
cmd = "hydra " + cmd[cmd.find(' '): len(cmd)].strip()
cmd = cmd.split(' ')
pwd = os.getcwd()
cwd = None
l.info("CWD = " + pformat(pwd))
l.info("CMD = " + pformat(cmd))
# Prepare ports data, mimic marathon.. sort of
myenv = os.environ.copy()
requested_ports = len(attr.ports)
curr_ports = []
for x in range(requested_ports):
myenv["PORT%d" % x] = str(self.env_ports[self.port_index])
curr_ports.append(str(self.env_ports[self.port_index]))
self.port_index += 1
# Init app info
app_info = AppInfo()
if app_id not in self.list_apps:
self.list_apps[app_id] = app_info
if app_id not in self.app_attr:
self.app_attr[app_id] = [attr, 0] # task index
myenv["mock"] = "true"
# launch children
self.list_apps[app_id].cmgr.add_child(app_local_launch_name, cmd, cwd, myenv)
self.list_apps[app_id].cmgr.launch_children(ports=curr_ports)
# Init task info, sort of mimics marathon
self.list_apps[app_id].tasks.append(TaskInfo())
task_count = self.app_attr[app_id][1]
self.list_apps[app_id].tasks[task_count].id = \
str(self.list_apps[app_id].cmgr.jobs[app_local_launch_name]["pid"])
self.list_apps[app_id].tasks[task_count].ports = \
self.list_apps[app_id].cmgr.jobs[app_local_launch_name]["ports"]
self.list_apps[app_id].tasks[task_count].host = "localhost"
self.list_apps[app_id].tasks_running = len(self.list_apps[app_id].tasks)
self.app_attr[app_id][1] += 1
def wait_app_removal(self, app):
"""
Wait for app to be removed
@args:
app : unique app name
"""
cnt = 0
while True:
if not self.get_app(app):
break
time.sleep(0.2)
cnt += 1
if cnt > 0:
l.info("Stuck waiting for %s to be deleted CNT=%d" % (app, cnt))
return True
def wait_app_ready(self, app, running_count, sleep_before_next_try=1):
"""
Wait for app to be ready
@args:
app : Unique app name
running_count : Expected app running count
"""
cnt = 0
while True:
a1 = self.get_app(app)
if a1.tasks_running == running_count:
return a1
cnt += 1
time.sleep(sleep_before_next_try)
if (cnt % 30) == 29:
l.info("Waiting for app [%s] to launch", app)
def scale_app(self, app, scale):
l.info("Mock scale app")
scale_app_name = app + "-scale"
attr = self.app_attr[app][0]
self.create_app(app, attr, app_local_launch_name=scale_app_name)
return True
class MockMesosIF(object):
"""
MockMesosIF class.
Creates a mapping of only one slave i-e localhost
"""
def __init__(self, addr):
self.slaves_ids = {}
self.slaves_hostname_info = {}
self.slave_count = 1
self.myaddr = addr
self.myip = "127.0.0.1"
self.update_slaves()
l.info("MockMesosIF init")
def update_slaves(self):
"""
Populate localhost data
"""
self.total_slaves = self.slave_count
self.slaves_hostname_info["localhost"] = self.myip
l.info(self.slaves_hostname_info)
def get_slave_ip_from_hn(self, slave_hn):
"""
Return ip for hostname (localhost)
@args:
slave_hn : slave hostname
"""
return self.slaves_hostname_info[slave_hn]
| {
"repo_name": "sushilks/hydra",
"path": "src/main/python/hydra/lib/mock_backend.py",
"copies": "4",
"size": "8204",
"license": "apache-2.0",
"hash": 8287475296248777000,
"line_mean": 29.1617647059,
"line_max": 95,
"alpha_frac": 0.551316431,
"autogenerated": false,
"ratio": 3.7021660649819497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.625348249598195,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AbdullahS'
from pprint import pprint, pformat # NOQA
import logging
from hydra.lib import util
from hydra.lib.utility.py_sys_cmd import PySysCommand
l = util.createlogger('h_common', logging.INFO)
# l.setLevel(logging.DEBUG)
def execute_remote_cmd(ip, user, cmd, timeout=10, suppress_output=False):
"""
Execute a remote command via ssh
@args:
ip: Remote IP to execute command on
user: Remote user
cmd: Command to execute
timeout: Command timeout
supress_output: Whether to dump stdout
"""
cmd = "ssh -o StrictHostKeyChecking=no %s@%s \"%s\"" % (user, ip, cmd)
l.info("Executing remote command [%s] on ip[%s], user[%s]", cmd, ip, user)
pg_cmd = PySysCommand(cmd)
pg_cmd.run(timeout=timeout)
output = pg_cmd.stdout + pg_cmd.stderr
if not suppress_output:
l.info("Result: %s", output)
return output
def execute_local_cmd(cmd, timeout=10):
"""
Execute a local command
@args:
cmd: Command to execute
timeout: Command timeout
"""
l.info("Executing local command [%s]", cmd)
pg_cmd = PySysCommand(cmd)
pg_cmd.run(timeout=timeout)
output = pg_cmd.stdout + pg_cmd.stderr
l.info("Result: %s", output)
| {
"repo_name": "kratos7/hydra",
"path": "src/main/python/hydra/lib/common.py",
"copies": "4",
"size": "1296",
"license": "apache-2.0",
"hash": 5420315013317296000,
"line_mean": 27.8,
"line_max": 78,
"alpha_frac": 0.6211419753,
"autogenerated": false,
"ratio": 3.456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6077141975300001,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AbdullahS'
import logging
import os
import time
import psutil
import pika
from hydra.lib import util
from hydra.lib.hdaemon import HDaemonRepSrv
from hydra.lib.childmgr import ChildManager
from pprint import pformat
l = util.createlogger('HSub', logging.INFO)
class HDRmqsRepSrv(HDaemonRepSrv):
def __init__(self, port):
self.msg_cnt = 0 # message count, other option is global, making progress
HDaemonRepSrv.__init__(self, port)
self.register_fn('getstats', self.get_stats)
self.register_fn('resetstats', self.reset_stats)
def get_stats(self):
process = psutil.Process()
self.run_data['stats']['net']['end'] = psutil.net_io_counters()
self.run_data['stats']['cpu']['end'] = process.cpu_times()
self.run_data['stats']['mem']['end'] = process.memory_info()
duration = self.run_data['last_msg_time'] - self.run_data['first_msg_time']
if duration == 0:
self.run_data['rate'] = 0
else:
self.run_data['rate'] = self.run_data['msg_cnt'] / duration
return ('ok', self.run_data)
def reset_stats(self):
l.info("RESETTING SUB STATS")
process = psutil.Process()
self.run_data = {'msg_cnt': 0, 'first_msg_time': 0, 'last_msg_time': 0, 'stats': {}}
self.run_data['stats']['net'] = {'start': psutil.net_io_counters()}
self.run_data['stats']['cpu'] = {'start': process.cpu_times()}
self.run_data['stats']['mem'] = {'start': process.memory_info()}
self.msg_cnt = 0
return ('ok', 'stats reset')
def callback(self, ch, method, properties, body):
self.msg_cnt = self.msg_cnt + 1
if self.run_data['first_msg_time'] == 0:
self.run_data['first_msg_time'] = time.time()
index, messagedata = body.split()
# l.info("%s, %s", index, messagedata)
# Update data for THIS client, later to be queried
self.run_data['msg_cnt'] = self.msg_cnt
self.run_data['last_msg_time'] = time.time()
def run10(argv):
pwd = os.getcwd()
l.info("CWD = " + pformat(pwd))
cmgr = ChildManager()
myenv = os.environ.copy()
cmd = './hydra hydra.rmqtest.rmq_sub.run'.split(' ') + argv[1:]
# TODO: (AbdullahS) Find a better way to do this
if "mock" in myenv:
cmd = 'hydra hydra.rmqtest.rmq_sub.run'.split(' ') + argv[1:]
cwd = None
for idx in range(0, 10):
myenv = os.environ.copy()
myenv["PORT0"] = myenv["PORT" + str(idx)]
l.info("Launch%d:" % idx + " cwd=" + " CMD=" + pformat(cmd) + " PORT0=" + str(myenv["PORT0"]))
cmgr.add_child('p' + str(idx), cmd, cwd, myenv)
cmgr.launch_children()
cmgr.wait()
def run(argv):
pub_ip = ""
l.info("JOB RUN : " + pformat(argv))
if len(argv) > 1:
pub_ip = argv[1]
if not pub_ip:
raise Exception("Rmq-sub needs a pub server to subscribe to, pub_ip"
" can not be empty pub_ip[%s]" % (pub_ip))
# Initalize HDaemonRepSrv
sub_rep_port = os.environ.get('PORT0')
hd = HDRmqsRepSrv(sub_rep_port)
hd.reset_stats()
hd.run()
l.info("RabbitMQ SUB client connecting to RabbitMQ PUB server at [%s]" % (pub_ip))
credentials = pika.PlainCredentials('hydra', 'hydra')
connection = pika.BlockingConnection(pika.ConnectionParameters(host=pub_ip, credentials=credentials))
channel = connection.channel()
channel.exchange_declare(exchange='pub', type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='pub', queue=queue_name)
l.info("RabbitMQ SUB client succesfully connected to RabbitMQ PUB server at [%s]" % (pub_ip))
hd.msg_cnt = 0
channel.basic_consume(hd.callback, queue=queue_name, no_ack=True)
channel.start_consuming()
| {
"repo_name": "sushilks/hydra",
"path": "src/main/python/hydra/rmqtest/rmq_sub.py",
"copies": "4",
"size": "3863",
"license": "apache-2.0",
"hash": -4137370242138258000,
"line_mean": 36.5048543689,
"line_max": 105,
"alpha_frac": 0.6078177582,
"autogenerated": false,
"ratio": 3.1978476821192054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5805665440319205,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AbdullahS'
import sys
from pprint import pprint, pformat # NOQA
from optparse import OptionParser
import logging
from hydra.lib import util
from hydra.lib.h_analyser import HAnalyser
from hydra.lib.hydrabase import HydraBase
try:
# Python 2.x
from ConfigParser import ConfigParser
except ImportError:
# Python 3.x
from configparser import ConfigParser
l = util.createlogger('runTest', logging.INFO)
# l.setLevel(logging.DEBUG)
tout_60s = 60000
tout_30s = 30000
tout_10s = 10000
class RMQPubAnalyser(HAnalyser):
def __init__(self, server_ip, server_port, task_id):
HAnalyser.__init__(self, server_ip, server_port, task_id)
class RMQSubAnalyser(HAnalyser):
def __init__(self, server_ip, server_port):
HAnalyser.__init__(self, server_ip, server_port)
class RunTestRMQ(HydraBase):
def __init__(self, options, runtest=True, mock=False):
self.options = options
self.config = ConfigParser()
HydraBase.__init__(self, 'RMQScale', self.options, self.config, startappserver=runtest, mock=mock)
self.rmqpub = '/rmq-pub'
self.rmqsub = '/rmq-sub'
self.add_appid(self.rmqpub)
self.add_appid(self.rmqsub)
self.boundary_setup(self.options, 'msg_rate', self.boundary_resultfn)
if runtest:
self.run_test()
self.stop_appserver()
def rerun_test(self, options):
self.options = options
self.boundary_setup(self.options, 'msg_rate', self.boundary_resultfn)
# self.test_duration = options.test_duration
# self.msg_batch = options.msg_batch
# self.msg_rate = options.msg_rate
l.info("Updating test metrics: test_duration=%s, msg_batch=%s, msg_rate=%s",
self.options.test_duration, self.options.msg_batch, self.options.msg_rate)
# Update the PUB server with new metrics
self.ha_pub.update_config(test_duration=self.options.test_duration,
msg_batch=self.options.msg_batch,
msg_requested_rate=self.options.msg_rate)
l.info("PUB server updated")
# Create test groups
g1 = self.create_app_group(self.rmqsub, "test-group", num_app_instances=10, analyser=HAnalyser)
g2 = self.create_app_group(self.rmqsub, "test-group2", num_app_instances=5, analyser=HAnalyser)
g3 = self.create_app_group(self.rmqsub, "test-group3", num_app_instances=5, analyser=HAnalyser)
l.info("Groups created")
self.remove_unresponsive_tasks(self.rmqsub)
g1._execute("do_ping")
g2._execute("do_ping")
g3._execute("do_ping")
# Pass signals in groups of apps
g1._execute("reset_stats")
g2._execute("reset_stats")
g3._execute("reset_stats")
# Signal message sending
l.info("Sending signal to PUB to start sending all messages..")
self.ha_pub.start_test()
self.ha_pub.wait_for_testend()
self.fetch_app_stats(self.rmqpub)
assert(len(self.apps[self.rmqpub]['stats']) == 1)
pub_data = self.apps[self.rmqpub]['stats'].values()[0]
l.info("Publisher send %d packets at the rate of %d pps" % (pub_data['msg_cnt'],
pub_data['rate']))
# Fetch all sub client data
self.fetch_app_stats(self.rmqsub)
return self.result_parser()
def run_test(self, first_run=True):
self.start_init()
if hasattr(self, 'sub_app_ip_rep_port_map'):
# If Sub's have been launched Reset first
self.reset_all_app_stats(self.rmqsub)
# Launch zmq pub
self.launch_rmq_pub()
# Launch zmq sub up to self.total_sub_apps
self.launch_rmq_sub()
# rerun the test
res = self.rerun_test(self.options)
return res
def boundary_resultfn(self, options, res):
message_rate = options.msg_rate
l.info("Completed run with message rate = %d and client count=%d " %
(message_rate, options.total_sub_apps * 10) +
"Reported Rate PUB:%f SUB:%f and Reported Drop Percentage : %f" %
(res['average_tx_rate'], res['average_rate'], res['average_packet_loss']))
l.info("\t\tCompleted-2: Pub-CPU:%3f%% PUB-TX:%.2fMbps PUB-RX:%.2fMbps " %
(res['pub_cpu'], res['pub_net_txrate'] / 1e6, res['pub_net_rxrate'] / 1e6))
run_pass = True
if (res['average_tx_rate'] < 0.7 * message_rate):
# if we are unable to get 70% of the tx rate
run_pass = False
return (run_pass, res['average_rate'], res['average_packet_loss'])
def stop_and_delete_all_apps(self):
self.delete_all_launched_apps()
def result_parser(self):
result = {
'client_count': 0,
'average_packets': 0,
'average_rate': 0,
'failing_clients': 0,
'average_packet_loss': 0
}
pub_data = self.apps[self.rmqpub]['stats'].values()[0]
msg_cnt_pub_tx = pub_data['msg_cnt']
bad_clients = 0
client_rate = 0
bad_client_rate = 0
clients_packet_count = 0
stats = self.get_app_stats(self.rmqsub)
num_subs = len(stats)
for client in stats.keys():
info = stats[client]
# l.info(" CLIENT = " + pformat(client) + " DATA = " + pformat(info))
client_rate += info['rate']
clients_packet_count += info['msg_cnt']
if info['msg_cnt'] != msg_cnt_pub_tx:
l.info("[%s] Count Mismatch Info: %s" % (client, pformat(info)))
bad_clients += 1
bad_client_rate += info['rate']
if bad_clients > 0:
l.info("Total number of clients experiencing packet drop = %d out of %d clients" %
(bad_clients, num_subs))
l.info('Average rate seen at the failing clients %f' % (bad_client_rate / bad_clients))
else:
l.info("No client experienced packet drops out of %d clients" % num_subs)
l.info("Total packet's send by PUB:%d and average packets received by client:%d" %
(msg_cnt_pub_tx, clients_packet_count / num_subs))
l.info('Average rate seen at the pub %f and at clients %f' %
(pub_data['rate'], (client_rate / num_subs)))
result['client_count'] = num_subs
result['packet_tx'] = msg_cnt_pub_tx
result['average_packets'] = clients_packet_count / result['client_count']
result['average_rate'] = client_rate / result['client_count']
result['failing_clients'] = bad_clients
result['average_tx_rate'] = pub_data['rate']
if bad_clients:
result['failing_clients_rate'] = (bad_client_rate / bad_clients)
result['average_packet_loss'] = \
((msg_cnt_pub_tx - (1.0 * clients_packet_count / result['client_count'])) * 100.0 / msg_cnt_pub_tx)
if 'cpu:start' in pub_data:
pub_total_cpu = (pub_data['cpu:end'][0] + pub_data['cpu:end'][1] -
(pub_data['cpu:start'][0] + pub_data['cpu:start'][1]))
else:
pub_total_cpu = 0
pub_total_time = pub_data['time:end'] - pub_data['time:start']
if 'net:start' in pub_data:
pub_total_nw_txbytes = pub_data['net:end'][0] - pub_data['net:start'][0]
pub_total_nw_rxbytes = pub_data['net:end'][1] - pub_data['net:start'][1]
else:
pub_total_nw_rxbytes = pub_total_nw_txbytes = 0
result['pub_cpu'] = 100.0 * pub_total_cpu / pub_total_time
result['pub_net_txrate'] = pub_total_nw_txbytes / pub_total_time
result['pub_net_rxrate'] = pub_total_nw_rxbytes / pub_total_time
l.debug(" RESULTS on TEST = " + pformat(result))
return result
def launch_rmq_pub(self):
l.info("Launching the RabbitMQ pub app")
constraints = [self.app_constraints(field='hostname', operator='UNIQUE')]
# Use cluster0 for launching the PUB
if 0 in self.mesos_cluster:
constraints.append(self.app_constraints(field=self.mesos_cluster[0]['cat'],
operator='CLUSTER', value=self.mesos_cluster[0]['match']))
self.create_hydra_app(name=self.rmqpub, app_path='hydra.rmqtest.rmq_pub.run',
app_args='%s %s %s' % (self.options.test_duration,
self.options.msg_batch,
self.options.msg_rate),
cpus=0.01, mem=32,
ports=[0],
constraints=constraints)
ipm = self.get_app_ipport_map(self.rmqpub)
assert(len(ipm) == 1)
self.pub_ip = ipm.values()[0][1]
self.pub_rep_taskport = str(ipm.values()[0][0])
l.info("[rmq_pub] RMQ pub server running at [%s]", self.pub_ip)
l.info("[rmq_pub] RMQ REP server running at [%s:%s]", self.pub_ip, self.pub_rep_taskport)
# Init RMQPubAnalyser
self.ha_pub = RMQPubAnalyser(self.pub_ip, self.pub_rep_taskport, ipm.keys()[0])
def launch_rmq_sub(self):
l.info("Launching the sub app")
self.total_app_groups = self.options.total_sub_apps / self.options.apps_in_group
self.options.total_sub_apps = self.options.total_sub_apps / self.options.apps_in_group
constraints = []
# Use cluster 1 for launching the SUB
if 1 in self.mesos_cluster:
constraints.append(self.app_constraints(field=self.mesos_cluster[1]['cat'],
operator='CLUSTER', value=self.mesos_cluster[1]['match']))
self.create_hydra_app(name=self.rmqsub, app_path='hydra.rmqtest.rmq_sub.run10',
app_args='%s' % (self.pub_ip),
cpus=0.01, mem=32,
ports=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
constraints=constraints)
self.scale_sub_app()
def scale_sub_app(self):
self.scale_app(self.rmqsub, self.options.total_sub_apps)
def delete_all_launched_apps(self):
l.info("Deleting all launched apps")
l.info("Deleting PUB")
self.delete_app(self.rmqpub)
l.info("Deleting SUBs")
self.delete_app(self.rmqsub)
class RunTest(object):
def __init__(self, argv):
usage = ('python %prog --test_duration=<time to run test> --msg_batch=<msg burst batch before sleep>'
'--msg_rate=<rate in packet per secs> --total_sub_apps=<Total sub apps to launch>'
'--config_file=<path_to_config_file> --keep_running')
parser = OptionParser(description='zmq scale test master',
version="0.1", usage=usage)
parser.add_option("--test_duration", dest='test_duration', type='float', default=10)
parser.add_option("--msg_batch", dest='msg_batch', type='int', default=100)
parser.add_option("--msg_rate", dest='msg_rate', type='float', default=10000)
parser.add_option("--total_sub_apps", dest='total_sub_apps', type='int', default=20)
parser.add_option("--apps_in_group", dest='apps_in_group', type='int', default=10)
parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini')
parser.add_option("--keep_running", dest='keep_running', action="store_true", default=False)
(options, args) = parser.parse_args()
if ((len(args) != 0)):
parser.print_help()
sys.exit(1)
r = RunTestRMQ(options, False)
r.start_appserver()
res = r.run_test()
r.delete_all_launched_apps()
print("RES = " + pformat(res))
if not options.keep_running:
r.stop_appserver()
else:
print("Keep running is set: Leaving the app server running")
print(" you can use the marathon gui/cli to scale the app up.")
print(" after you are done press enter on this window")
input('>')
r.stop_appserver()
| {
"repo_name": "lake-lerna/hydra",
"path": "src/main/python/hydra/rmqtest/runtest.py",
"copies": "4",
"size": "12277",
"license": "apache-2.0",
"hash": 4800181781085176000,
"line_mean": 43.321299639,
"line_max": 111,
"alpha_frac": 0.5710678505,
"autogenerated": false,
"ratio": 3.5037100456621006,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.60747778961621,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AbdullahS'
import time
import logging
import os
import psutil
import pika
import json
from pprint import pprint, pformat # NOQA
from hydra.lib import util
from hydra.lib.hdaemon import HDaemonRepSrv
l = util.createlogger('HPub', logging.INFO)
class HDRmqpRepSrv(HDaemonRepSrv):
def __init__(self, port, run_data, pub_metrics):
self.run_data = run_data
self.pub_metrics = pub_metrics
self.init_pub_metrics()
HDaemonRepSrv.__init__(self, port)
self.register_fn('teststart', self.test_start)
self.register_fn('getstats', self.get_stats)
self.register_fn('teststatus', self.test_status)
self.register_fn('updateconfig', self.update_config)
def test_start(self):
process = psutil.Process()
self.run_data['start'] = True
self.run_data['test_status'] = 'running'
self.run_data['stats'] = {'net:start': json.dumps(psutil.net_io_counters()),
'cpu:start': json.dumps(process.cpu_times()),
'mem:start': json.dumps(process.memory_info()),
'time:start': json.dumps(time.time())}
return ('ok', None)
def get_stats(self):
l.info("Sending Stats:" + pformat(self.run_data['stats']))
return ('ok', self.run_data['stats'])
def test_status(self):
return ('ok', self.run_data['test_status'])
def init_pub_metrics(self):
l.info("Init PUB metrics...")
self.test_duration = self.pub_metrics['test_duration']
self.msg_batch = self.pub_metrics['msg_batch']
self.msg_requested_rate = self.pub_metrics['msg_requested_rate']
def update_config(self, test_duration, msg_batch, msg_requested_rate):
self.test_duration = float(test_duration)
self.msg_batch = int(msg_batch)
self.msg_requested_rate = float(msg_requested_rate)
l.info("PUB updated metrics: test_duration=%f, msg_batch=%f, msg_requested_rate=%f", self.test_duration,
self.msg_batch, self.msg_requested_rate)
return ('ok', None)
def run(argv):
if len(argv) > 3:
test_duration = argv[1]
msg_batch = argv[2]
msg_requested_rate = argv[3]
msg_batch = int(msg_batch)
msg_requested_rate = float(msg_requested_rate)
test_duration = float(test_duration)
# init and Rabbitmq bind pub server, type = 'fanout'
# For more info on types supported
# https://www.rabbitmq.com/tutorials/amqp-concepts.html
l.info("Starting RabbitMQ PUB server at")
credentials = pika.PlainCredentials('hydra', 'hydra')
r_pub_conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', credentials=credentials))
channel = r_pub_conn.channel()
channel.exchange_declare(exchange='pub', type='fanout')
# init simple Rep server, this is used to listen
# for the signal to start sending data
pub_rep_port = os.environ.get('PORT0')
l.info("Starting RabbitMQ REP server at port [%s]", pub_rep_port)
run_data = {'start': False,
'stats': {'rate': 0, 'msg_cnt': 0},
'test_status': 'stopped'}
pub_metrics = {'test_duration': test_duration,
'msg_batch': msg_batch,
'msg_requested_rate': msg_requested_rate}
hd = HDRmqpRepSrv(pub_rep_port, run_data, pub_metrics)
hd.run()
while True:
if not run_data['start']:
l.debug("PUB WAITING FOR SIGNAL")
time.sleep(1)
continue
l.info("PUB server initiating test_duration [%f] messages, with batches [%d] with msg rate[%f]",
hd.test_duration, hd.msg_batch, hd.msg_requested_rate)
cnt = 0
msg_cnt = 0
start_time = time.time()
while True:
messagedata = "msg%d" % msg_cnt
message = "%d %s" % (msg_cnt, messagedata)
channel.basic_publish(exchange='pub', routing_key='', body=message)
# l.info(message)
cnt += 1
msg_cnt += 1
if cnt >= hd.msg_batch:
# compute the delay
duration = time.time() - start_time
expected_time = msg_cnt / hd.msg_requested_rate
delay = 0.0
if expected_time > duration:
delay = expected_time - duration
if delay > 1:
delay = 1
time.sleep(delay)
cnt = 0
elapsed_time = time.time() - start_time
if elapsed_time >= hd.test_duration:
break
run_data['stats']['time:end'] = json.dumps(time.time())
run_data['stats']['rate'] = msg_cnt / elapsed_time
run_data['stats']['msg_cnt'] = msg_cnt
process = psutil.Process()
run_data['stats']['net:end'] = json.dumps(psutil.net_io_counters())
run_data['stats']['cpu:end'] = json.dumps(process.cpu_times())
run_data['stats']['mem:end'] = json.dumps(process.memory_info())
run_data['test_status'] = 'stopping'
# Go back to waiting for the next test
run_data['start'] = False
continue
r_pub_conn.close()
l.info("PUB Server stopping after sending %d messages elapsed time %f and message rate %f" %
(msg_cnt, elapsed_time, run_data['stats']['rate']))
break
| {
"repo_name": "kratos7/hydra",
"path": "src/main/python/hydra/rmqtest/rmq_pub.py",
"copies": "4",
"size": "5421",
"license": "apache-2.0",
"hash": -4117551571402022400,
"line_mean": 39.1555555556,
"line_max": 112,
"alpha_frac": 0.5784910533,
"autogenerated": false,
"ratio": 3.6702775897088693,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006220034571466482,
"num_lines": 135
} |
__author__ = 'AbdullahS, sushil'
from pprint import pprint, pformat # NOQA
import zmq
import logging
import json
import traceback
from hydra.lib import util
from hydra.lib.utility.h_threading import HThreading
from hydra.lib import hdaemon_pb2
l = util.createlogger('HDaemon', logging.INFO)
# l.setLevel(logging.DEBUG)
class HDaemonRepSrv(object):
def __init__(self, port):
l.info("initiated... REP port[%s]", port)
self.port = port
self.data = {} # Dict calling class can use to store data, can be fetched later
self.t_exceptions = []
self.h_threading = HThreading()
self.cbfn = {}
def thread_cb(self, t_exceptions):
for exception in t_exceptions:
self.t_exceptions.append(exception)
l.error(exception)
def run(self):
l.info("spawning run thread...")
self.register_fn('ping', self.ping_task)
self.register_fn('teststart', self.test_start)
self.register_fn('teststop', self.test_stop)
self.register_fn('getstats', self.get_stats)
self.register_fn('resetstats', self.reset_stats)
self.register_fn('teststatus', self.test_status)
self.register_fn('updateconfig', self.update_config)
self.h_threading.start_thread(self.thread_cb, self.start)
def register_fn(self, token, fn):
l.debug("Registering function for [%s]" % token)
if token in self.cbfn:
l.info('token [%s] is already registered' % token)
self.cbfn[token] = fn
def ping_task(self):
return ('ok', 'pong')
def test_start(self, **kwargs):
l.info('TEST START NEED TO BE IMPLEMENTED')
return('ok', None)
def test_stop(self):
l.info('TEST STOP NEED TO BE IMPLEMENTED')
return('ok', None)
def test_status(self):
l.info('TEST STATUS NEED TO BE IMPLEMENTED')
return('ok', None)
def get_stats(self):
l.info('GET STATS NEED TO BE IMPLEMENTED')
return('ok', None)
def reset_stats(self):
l.info('RESET STATS NEED TO BE IMPLEMENTED')
return('ok', None)
def update_config(self, **kwargs):
l.info('UPDATE CONFIG NEED TO BE IMPLEMENTED')
return('ok', None)
def send_response(self, status, msg):
self.socket.send(json.dumps([status, msg]))
def start(self):
l.info("Binding zmq REP socket...")
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://*:%s" % self.port)
l.info("Done Binding zmq REP socket...")
msg = hdaemon_pb2.CommandMessage()
rmsg = hdaemon_pb2.ResponseMessage()
while True:
# Wait for next request from client
raw_message = self.socket.recv()
msg.ParseFromString(raw_message)
# message = json.loads(raw_message)
l.info("Received request: [%s]", str(msg.type))
# Stop and return
if msg.type == hdaemon_pb2.CommandMessage.STOP:
l.info("Stopping.")
self.stop()
return
elif ((msg.type == hdaemon_pb2.CommandMessage.SUBCMD) and
(not msg.HasField("cmd") or msg.cmd.cmd_name not in self.cbfn)):
if msg.HasField("cmd"):
msg = "UNKNOWN message [%s] received... " % msg.cmd.cmd_name
else:
msg = "UNKNOWN message with not cmd field received... "
status_code = 'error'
l.error(msg)
self.send_response(status_code, msg)
else:
fn = self.cbfn[msg.cmd.cmd_name]
kwargs = {}
for targ in msg.cmd.argument:
if targ.HasField("intValue"):
kwargs[targ.name] = targ.intValue
elif targ.HasField("floatValue"):
kwargs[targ.name] = targ.floatValue
elif targ.HasField("strValue"):
kwargs[targ.name] = targ.strValue
else:
kwargs[targ.name] = True
try:
sts, msghash_t = fn(**kwargs)
except:
sts = 'exception'
msghash_t = traceback.format_exc()
rmsg.Clear()
rmsg.status = sts
if type(msghash_t) is dict:
msghash = msghash_t
else:
msghash = {}
msghash['__r'] = msghash_t
if msghash:
for key in msghash:
r = rmsg.resp.add()
r.name = str(key)
if type(msghash[key]) is int:
r.intValue = msghash[key]
elif type(msghash[key]) is float:
r.floatValue = msghash[key]
else:
r.strValue = str(msghash[key])
l.info("Sending Response STATUS=" + pformat(sts) + " MSG=" + pformat(msghash))
# self.send_response(sts, msg)
self.socket.send(rmsg.SerializeToString())
def stop(self):
l.info("Received stop signal, closing socket")
self.socket.close()
| {
"repo_name": "kratos7/hydra",
"path": "src/main/python/hydra/lib/hdaemon.py",
"copies": "4",
"size": "5381",
"license": "apache-2.0",
"hash": 2527723556447115300,
"line_mean": 35.3581081081,
"line_max": 94,
"alpha_frac": 0.5261103884,
"autogenerated": false,
"ratio": 4.012677106636838,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6538787495036839,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from base import DocumentWrapper
###############################################################################
# Generic Cluster Class
###############################################################################
class Cluster(DocumentWrapper):
###########################################################################
# Constructor and other init methods
###########################################################################
def __init__(self, cluster_document):
DocumentWrapper.__init__(self, cluster_document)
self._members = self._resolve_members("members")
###########################################################################
def _resolve_members(self, member_prop):
member_documents = self.get_property(member_prop)
members = []
# if members are not set then return
member_type = self.get_member_type()
if member_documents:
for mem_doc in member_documents:
member = member_type(mem_doc)
members.append(member)
return members
###########################################################################
def get_member_type(self):
raise Exception("Should be implemented by subclasses")
###########################################################################
# Properties
###########################################################################
def get_description(self):
return self.get_property("description")
###########################################################################
def get_members(self):
return self._members
###########################################################################
def get_repl_key(self):
return self.get_property("replKey")
###########################################################################
def has_member_server(self, server):
return self.get_member_for(server) is not None
###########################################################################
def get_member_for(self, server):
for member in self.get_members():
if (member.get_server() and
member.get_server().id == server.id):
return member
return None
###########################################################################
def get_status(self):
"""
Needs to be overridden
"""
###########################################################################
def get_default_server(self):
"""
Needs to be overridden
"""
###########################################################################
def get_mongo_uri_template(self, db=None):
if not db:
if self.get_repl_key():
db = "[/<dbname>]"
else:
db = ""
else:
db = "/" + db
server_uri_templates = []
for member in self.get_members():
server = member.get_server()
server_uri_templates.append(server.get_address_display())
creds = "[<dbuser>:<dbpass>@]" if self.get_repl_key() else ""
return ("mongodb://%s%s%s" % (creds, ",".join(server_uri_templates),
db))
def get_cluster_name(self):
str_name = self.get_property("_id")
if not str_name:
str_name = "NoName"
return str_name
@property
def get_members_info(self):
"""
Needs to be ovveridden
@return:
"""
pass | {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/objects/cluster.py",
"copies": "1",
"size": "3635",
"license": "mit",
"hash": -6839899916959995000,
"line_mean": 33.3018867925,
"line_max": 79,
"alpha_frac": 0.3639614856,
"autogenerated": false,
"ratio": 5.844051446945338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009383242036924325,
"num_lines": 106
} |
__author__ = 'abdul'
from base import DocumentWrapper
###############################################################################
# Generic Cluster Class
###############################################################################
class Cluster(DocumentWrapper):
###########################################################################
# Constructor and other init methods
###########################################################################
def __init__(self, cluster_document):
DocumentWrapper.__init__(self, cluster_document)
self._members = self._resolve_members("members")
###########################################################################
def _resolve_members(self, member_prop):
member_documents = self.get_property(member_prop)
members = []
# if members are not set then return
member_type = self.get_member_type()
if member_documents:
for mem_doc in member_documents:
member = member_type(mem_doc)
members.append(member)
return members
###########################################################################
def get_member_type(self):
raise Exception("Should be implemented by subclasses")
###########################################################################
# Properties
###########################################################################
def get_description(self):
return self.get_ignore_str_property("description")
###########################################################################
def get_members(self):
return self._members
###########################################################################
def get_servers(self):
servers = []
for member in self.get_members():
if member.get_server():
servers.append(member.get_server())
return servers
###########################################################################
def get_members_info(self):
info = []
for member in self.get_members():
server = member.get_server()
if server is not None:
info.append(server.id)
else:
info.append("<Invalid Member>")
return info
###########################################################################
def get_repl_key(self):
return self.get_property("replKey")
###########################################################################
def has_member_server(self, server):
return self.get_member_for(server) is not None
###########################################################################
def get_member_for(self, server):
for member in self.get_members():
if (member.get_server() and
member.get_server().id == server.id):
return member
return None
###########################################################################
def get_status(self):
"""
Needs to be overridden
"""
###########################################################################
def get_default_server(self):
"""
Needs to be overridden
"""
###########################################################################
def is_auth(self):
if self.get_repl_key():
return True
else:
auth_servers = filter(lambda s: s.is_auth(), self.get_servers())
return auth_servers and len(auth_servers) > 0
###########################################################################
def get_mongo_uri_template(self, db=None):
auth = self.is_auth()
if not db:
if auth:
db = "/<dbname>"
else:
db = ""
else:
db = "/" + db
server_uri_templates = []
for server in self.get_servers():
if server.is_cluster_connection_member():
server_uri_templates.append(server.get_address_display())
creds = "<dbuser>:<dbpass>@" if auth else ""
return "mongodb://%s%s%s?replicaSet=%s" % (creds, ",".join(server_uri_templates), db, self.id)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/objects/cluster.py",
"copies": "1",
"size": "4318",
"license": "mit",
"hash": -5232055353074801000,
"line_mean": 34.1056910569,
"line_max": 102,
"alpha_frac": 0.3675312645,
"autogenerated": false,
"ratio": 5.890859481582537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6758390746082538,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from bson.son import SON
from mongoctl.utils import (
document_pretty_string, wait_for, kill_process, is_pid_alive
)
from mongoctl.mongoctl_logging import *
from mongoctl.errors import MongoctlException
import mongoctl.repository as repository
import mongoctl.objects.server
from start import server_stopped_predicate
from mongoctl.prompt import prompt_execute_task
###############################################################################
# Constants
###############################################################################
MAX_SHUTDOWN_WAIT = 45
###############################################################################
# stop command
###############################################################################
def stop_command(parsed_options):
server_id = parsed_options.server
server = repository.lookup_and_validate_server(server_id)
username = None
password = None
if hasattr(parsed_options, "username"):
username = parsed_options.username
if hasattr(parsed_options, "password"):
password = parsed_options.password
stop_server(server, force=parsed_options.forceStop, username=username, password=password)
###############################################################################
# stop server
###############################################################################
def stop_server(server, force=False, username=None, password=None):
if not username is None and not password is None:
# We first authenticate to the admin database
server.get_db(dbname="admin", username=username, password=password)
do_stop_server(server, force)
###############################################################################
def do_stop_server(server, force=False):
# ensure that the stop was issued locally. Fail otherwise
server.validate_local_op("stop")
log_info("Checking to see if server '%s' is actually running before"
" stopping it..." % server.id)
# init local flags
can_stop_mongoly = True
shutdown_success = False
status = server.get_status()
if not status['connection']:
if "timedOut" in status:
log_info("Unable to issue 'shutdown' command to server '%s'. "
"The server is not responding (connection timed out) "
"although port %s is open, possibly for mongo* process." %
(server.id, server.get_port()))
can_stop_mongoly = False
else:
log_info("Server '%s' is not running." %
server.id)
return
pid = server.get_pid()
pid_disp = pid if pid else "[Cannot be determined]"
log_info("Stopping server '%s' (pid=%s)..." %
(server.id, pid_disp))
# log server activity stop
server.log_server_activity("stop")
# TODO: Enable this again when stabilized
# step_down_if_needed(server, force)
if can_stop_mongoly:
log_verbose(" ... issuing db 'shutdown' command ... ")
shutdown_success = mongo_stop_server(server, pid, force=False)
if not can_stop_mongoly or not shutdown_success:
log_verbose(" ... taking more forceful measures ... ")
shutdown_success = \
prompt_or_force_stop_server(server, pid, force,
try_mongo_force=can_stop_mongoly)
if shutdown_success:
log_info("Server '%s' has stopped." % server.id)
else:
raise MongoctlException("Unable to stop server '%s'." %
server.id)
###############################################################################
def step_down_if_needed(server, force):
## if server is a primary replica member then step down
if server.is_primary():
if force:
step_server_down(server, force)
else:
prompt_step_server_down(server, force)
###############################################################################
def mongo_stop_server(server, pid, force=False):
try:
shutdown_cmd = SON([('shutdown', 1), ('force', force)])
log_info("\nSending the following command to %s:\n%s\n" %
(server.get_connection_address(),
document_pretty_string(shutdown_cmd)))
server.disconnecting_db_command(shutdown_cmd, "admin")
log_info("Will now wait for server '%s' to stop." % server.id)
# Check that the server has stopped
stop_pred = server_stopped_predicate(server, pid)
wait_for(stop_pred, timeout=MAX_SHUTDOWN_WAIT)
if not stop_pred():
log_error("Shutdown command failed...")
return False
else:
return True
except Exception, e:
log_exception(e)
log_error("Failed to gracefully stop server '%s'. Cause: %s" %
(server.id, e))
return False
###############################################################################
def force_stop_server(server, pid, try_mongo_force=True):
success = False
# try mongo force stop if server is still online
if server.is_online() and try_mongo_force:
success = mongo_stop_server(server, pid, force=True)
if not success or not try_mongo_force:
success = kill_stop_server(server, pid)
return success
###############################################################################
def kill_stop_server(server, pid):
if pid is None:
log_error("Cannot forcibly stop the server because the server's process"
" ID cannot be determined; pid file '%s' does not exist." %
server.get_pid_file_path())
return False
log_info("Forcibly stopping server '%s'...\n" % server.id)
log_info("Sending kill -1 (HUP) signal to server '%s' (pid=%s)..." %
(server.id, pid))
kill_process(pid, force=False)
log_info("Will now wait for server '%s' (pid=%s) to die." %
(server.id, pid))
wait_for(pid_dead_predicate(pid), timeout=MAX_SHUTDOWN_WAIT)
if is_pid_alive(pid):
log_error("Failed to kill server process with -1 (HUP).")
log_info("Sending kill -9 (SIGKILL) signal to server"
"'%s' (pid=%s)..." % (server.id, pid))
kill_process(pid, force=True)
log_info("Will now wait for server '%s' (pid=%s) to die." %
(server.id, pid))
wait_for(pid_dead_predicate(pid), timeout=MAX_SHUTDOWN_WAIT)
if not is_pid_alive(pid):
log_info("Forcefully-stopped server '%s'." % server.id)
return True
else:
log_error("Forceful stop of server '%s' failed." % server.id)
return False
###############################################################################
def prompt_or_force_stop_server(server, pid,
force=False, try_mongo_force=True):
if force:
return force_stop_server(server, pid,
try_mongo_force=try_mongo_force)
def stop_func():
return force_stop_server(server, pid,
try_mongo_force=try_mongo_force)
if try_mongo_force:
result = prompt_execute_task("Issue the shutdown with force command?",
stop_func)
else:
result = prompt_execute_task("Forcefully stop the server process?",
stop_func)
return result[1]
###############################################################################
def step_server_down(server, force=False):
log_info("Stepping down server '%s'..." % server.id)
try:
cmd = SON([('replSetStepDown', 10), ('force', force)])
server.disconnecting_db_command(cmd, "admin")
log_info("Server '%s' stepped down successfully!" % server.id)
return True
except Exception, e:
log_exception(e)
log_error("Failed to step down server '%s'. Cause: %s" %
(server.id, e))
return False
###############################################################################
def prompt_step_server_down(server, force):
def step_down_func():
step_server_down(server, force)
return prompt_execute_task("Server '%s' is a primary server. "
"Step it down before proceeding to shutdown?" %
server.id,
step_down_func)
###############################################################################
def pid_dead_predicate(pid):
def pid_dead():
return not is_pid_alive(pid)
return pid_dead
| {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/commands/server/stop.py",
"copies": "2",
"size": "8674",
"license": "mit",
"hash": 7357226350246813000,
"line_mean": 36.7130434783,
"line_max": 93,
"alpha_frac": 0.514180309,
"autogenerated": false,
"ratio": 4.44137224782386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.595555255682386,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from bson.son import SON
from mongoctl.utils import (
document_pretty_string, wait_for, kill_process, is_pid_alive
)
from mongoctl.mongoctl_logging import *
from mongoctl.errors import MongoctlException
import mongoctl.repository
import mongoctl.objects.server
from start import server_stopped_predicate
from mongoctl.prompt import prompt_execute_task
###############################################################################
# Constants
###############################################################################
MAX_SHUTDOWN_WAIT = 45
###############################################################################
# stop command
###############################################################################
def stop_command(parsed_options):
stop_server(parsed_options.server, force=parsed_options.forceStop,
port=parsed_options.port)
###############################################################################
# stop server
###############################################################################
def stop_server(server_id, force=False, port=None):
server = mongoctl.repository.lookup_and_validate_server(server_id)
# apply overrides to server's cmd options (in memory only)
if port:
options_override = {"port": port}
server.apply_cmd_options_overrides(options_override)
do_stop_server(server, force)
###############################################################################
def do_stop_server(server, force=False):
# ensure that the stop was issued locally. Fail otherwise
server.validate_local_op("stop")
log_info("Checking to see if server '%s' is actually running before"
" stopping it..." % server.id)
# init local flags
can_stop_mongoly = True
shutdown_success = False
status = server.get_status()
if not status['connection']:
if "timedOut" in status:
log_info("Unable to issue 'shutdown' command to server '%s'. "
"The server is not responding (connection timed out) "
"although port %s is open, possibly for mongod." %
(server.id, server.get_port()))
can_stop_mongoly = False
elif "error" in status and "SSL handshake failed" in status["error"]:
log_info("Unable to issue 'shutdown' command to server '%s'. "
"The server appears to be configured with SSL but is not "
"currently running with SSL (SSL handshake failed). "
"Try running mongoctl with --ssl-off." % server.id)
can_stop_mongoly = False
elif "error" in status and "connection closed" in status["error"]:
log_info("Unable to issue 'shutdown' command to server '%s'. "
"The server appears to have reached max # of connections." % server.id)
can_stop_mongoly = False
elif server.is_server_pid_alive():
log_info("Unable to issue 'shutdown' command to server '%s'. The server is not responding." % server.id)
can_stop_mongoly = False
else:
log_info("Server '%s' is not running." %
server.id)
return
pid = server.get_pid()
pid_disp = pid if pid else "[Cannot be determined]"
log_info("Stopping server '%s' (pid=%s)..." %
(server.id, pid_disp))
# log server activity stop
server.log_server_activity("stop")
# TODO: Enable this again when stabilized
# step_down_if_needed(server, force)
if can_stop_mongoly:
log_verbose(" ... issuing db 'shutdown' command ... ")
shutdown_success = mongo_stop_server(server, pid, force=False)
if not can_stop_mongoly or not shutdown_success:
log_verbose(" ... taking more forceful measures ... ")
shutdown_success = \
prompt_or_force_stop_server(server, pid, force,
try_mongo_force=can_stop_mongoly)
if shutdown_success:
log_info("Server '%s' has stopped." % server.id)
else:
raise MongoctlException("Unable to stop server '%s'." %
server.id)
###############################################################################
def step_down_if_needed(server, force):
## if server is a primary replica member then step down
if server.is_primary():
if force:
step_server_down(server, force)
else:
prompt_step_server_down(server, force)
###############################################################################
def mongo_stop_server(server, pid, force=False):
try:
shutdown_cmd = SON( [('shutdown', 1),('force', force)])
log_info("\nSending the following command to %s:\n%s\n" %
(server.get_connection_address(),
document_pretty_string(shutdown_cmd)))
server.disconnecting_db_command(shutdown_cmd, "admin")
log_info("Will now wait for server '%s' to stop." % server.id)
# Check that the server has stopped
stop_pred = server_stopped_predicate(server, pid)
wait_for(stop_pred,timeout=MAX_SHUTDOWN_WAIT)
if not stop_pred():
log_error("Shutdown command failed...")
return False
else:
return True
except Exception, e:
log_exception(e)
log_error("Failed to gracefully stop server '%s'. Cause: %s" %
(server.id, e))
return False
###############################################################################
def force_stop_server(server, pid, try_mongo_force=True):
success = False
# try mongo force stop if server is still online
if server.is_online() and try_mongo_force:
success = mongo_stop_server(server, pid, force=True)
if not success or not try_mongo_force:
success = kill_stop_server(server, pid)
return success
###############################################################################
def kill_stop_server(server, pid):
if pid is None:
log_error("Cannot forcibly stop the server because the server's process"
" ID cannot be determined; pid file '%s' does not exist." %
server.get_pid_file_path())
return False
log_info("Sending kill -9 (SIGKILL) signal to server '%s' (pid=%s)..." % (server.id, pid))
kill_process(pid, force=True)
log_info("Will now wait for server '%s' (pid=%s) to die." % (server.id, pid))
wait_for(pid_dead_predicate(pid), timeout=MAX_SHUTDOWN_WAIT)
if not is_pid_alive(pid):
log_info("Forcefully-stopped server '%s'." % server.id)
return True
else:
log_error("Forceful stop of server '%s' failed." % server.id)
return False
###############################################################################
def prompt_or_force_stop_server(server, pid,
force=False, try_mongo_force=True):
if force:
return force_stop_server(server, pid,
try_mongo_force=try_mongo_force)
def stop_func():
return force_stop_server(server, pid,
try_mongo_force=try_mongo_force)
if try_mongo_force:
result = prompt_execute_task("Issue the shutdown with force command?",
stop_func)
else:
result = prompt_execute_task("Kill the server process?",
stop_func)
return result[1]
###############################################################################
def step_server_down(server, force=False):
log_info("Stepping down server '%s'..." % server.id)
try:
cmd = SON( [('replSetStepDown', 10),('force', force)])
server.disconnecting_db_command(cmd, "admin")
log_info("Server '%s' stepped down successfully!" % server.id)
return True
except Exception, e:
log_exception(e)
log_error("Failed to step down server '%s'. Cause: %s" %
(server.id, e))
return False
###############################################################################
def prompt_step_server_down(server, force):
def step_down_func():
step_server_down(server, force)
return prompt_execute_task("Server '%s' is a primary server. "
"Step it down before proceeding to shutdown?" %
server.id,
step_down_func)
###############################################################################
def pid_dead_predicate(pid):
def pid_dead():
return not is_pid_alive(pid)
return pid_dead
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/server/stop.py",
"copies": "1",
"size": "8780",
"license": "mit",
"hash": -3062294088784132000,
"line_mean": 37.8495575221,
"line_max": 116,
"alpha_frac": 0.5128701595,
"autogenerated": false,
"ratio": 4.497950819672131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5510820979172131,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from distutils2.version import NormalizedVersion, suggest_normalized_version
from errors import MongoctlException
# Version support stuff
MIN_SUPPORTED_VERSION = "1.8"
###############################################################################
# MongoDBEdition (enum)
###############################################################################
class MongoDBEdition():
COMMUNITY = "community"
COMMUNITY_SSL = "community_ssl"
ENTERPRISE = "enterprise"
ALL = [COMMUNITY, COMMUNITY_SSL, ENTERPRISE]
###############################################################################
# MongoDBVersionInfo class
# we had to inherit and override __str__ because the suggest_normalized_version
# method does not maintain the release candidate version properly
###############################################################################
class MongoDBVersionInfo(NormalizedVersion):
def __init__(self, version_number, edition=None):
sugg_ver = suggest_normalized_version(version_number)
super(MongoDBVersionInfo,self).__init__(sugg_ver)
self.version_number = version_number
self.edition = edition or MongoDBEdition.COMMUNITY
###########################################################################
def __str__(self):
return "%s (%s)" % (self.version_number, self.edition)
###########################################################################
def __eq__(self, other):
return (other is not None and
self.equals_ignore_edition(other) and
self.edition == other.edition)
###########################################################################
def equals_ignore_edition(self, other):
return super(MongoDBVersionInfo, self).__eq__(other)
###############################################################################
def is_valid_version_info(version_info):
return (is_valid_version(version_info.version_number) and
version_info.edition in MongoDBEdition.ALL)
###############################################################################
def is_valid_version(version_number):
return suggest_normalized_version(version_number) is not None
###############################################################################
# returns true if version is greater or equal to 1.8
def is_supported_mongo_version(version_number):
return (make_version_info(version_number)>=
make_version_info(MIN_SUPPORTED_VERSION))
###############################################################################
def make_version_info(version_number, edition=None):
if version_number is None:
return None
version_number = version_number.strip()
version_number = version_number.replace("-pre-" , "-pre")
version_info = MongoDBVersionInfo(version_number, edition=edition)
# validate version string
if not is_valid_version_info(version_info):
raise MongoctlException("Invalid version '%s." % version_info)
else:
return version_info
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/mongodb_version.py",
"copies": "1",
"size": "3066",
"license": "mit",
"hash": -1201776706138043400,
"line_mean": 39.3421052632,
"line_max": 79,
"alpha_frac": 0.4993476843,
"autogenerated": false,
"ratio": 5.0344827586206895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.603383044292069,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from mongoctl.utils import document_pretty_string
###############################################################################
# Document Wrapper Class
###############################################################################
class DocumentWrapper(object):
###########################################################################
# Constructor
###########################################################################
def __init__(self, document):
self.__document__ = document
###########################################################################
# Overridden Methods
###########################################################################
def __str__(self):
return document_pretty_string(self.__document__)
###########################################################################
def get_document(self):
return self.__document__
###########################################################################
# Properties
###########################################################################
def get_property(self, property_name):
return self.__document__.get(property_name)
###########################################################################
def set_property(self, name, value):
self.__document__[name] = value
###########################################################################
def get_ignore_str_property(self, name):
val = self.get_property(name)
if val:
val = val.encode('ascii', 'ignore')
return val
###########################################################################
@property
def id(self):
return self.get_property('_id')
@id.setter
def id(self, value):
self.set_property('_id', value)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/objects/base.py",
"copies": "1",
"size": "1864",
"license": "mit",
"hash": -6790554958325382000,
"line_mean": 35.5490196078,
"line_max": 79,
"alpha_frac": 0.2929184549,
"autogenerated": false,
"ratio": 7.1692307692307695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.796214922413077,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from mongoctl.utils import resolve_path
import server
###############################################################################
# CONSTANTS
###############################################################################
###############################################################################
# MongosServer Class
###############################################################################
class MongosServer(server.Server):
###########################################################################
# Constructor
###########################################################################
def __init__(self, server_doc):
super(MongosServer, self).__init__(server_doc)
###########################################################################
def export_cmd_options(self, options_override=None):
"""
Override!
:return:
"""
cmd_options = super(MongosServer, self).export_cmd_options(
options_override=options_override)
# Add configServers arg
cluster = self.get_validate_cluster()
config_addresses = ",".join(cluster.get_config_member_addresses())
cmd_options["configdb"] = config_addresses
return cmd_options
###########################################################################
# Properties
###########################################################################
def get_db_path(self):
dbpath = self.get_cmd_option("dbpath")
if not dbpath:
dbpath = super(MongosServer, self).get_server_home()
if not dbpath:
dbpath = server.DEFAULT_DBPATH
return resolve_path(dbpath)
def get_server_home(self):
"""
Override!
:return:
"""
home_dir = super(MongosServer, self).get_server_home()
if not home_dir:
home_dir = self.get_db_path()
return home_dir
| {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/objects/mongos.py",
"copies": "2",
"size": "1981",
"license": "mit",
"hash": 4509212096900290000,
"line_mean": 31.4754098361,
"line_max": 79,
"alpha_frac": 0.3740535083,
"autogenerated": false,
"ratio": 5.596045197740113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6970098706040113,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from pymongo import uri_parser, errors
###############################################################################
# mongo uri tool. Contains utility functions for dealing with mongo uris
###############################################################################
###############################################################################
# MongoUriWrapper
###############################################################################
class MongoUriWrapper:
"""
A Mongo URI wrapper that makes it easy to deal with mongo uris:
- Masks user/password on display (i.e. __str__()
"""
###########################################################################
# Constructor
###########################################################################
def __init__(self, uri_obj):
self._uri_obj = uri_obj
###########################################################################
@property
def raw_uri(self):
return self._get_uri(mask=False)
###########################################################################
@property
def member_raw_uri_list(self):
return self._get_member_uri_list(mask=False)
###########################################################################
@property
def masked_uri(self):
return self._get_uri(mask=True)
###########################################################################
@property
def member_masked_uri_list(self):
return self._get_member_uri_list(mask=True)
###########################################################################
@property
def database(self):
return self._uri_obj["database"]
@database.setter
def database(self, value):
self._uri_obj["database"] = value
###########################################################################
@property
def node_list(self):
return self._uri_obj["nodelist"]
###########################################################################
@property
def address(self):
return self.addresses[0]
###########################################################################
@property
def addresses(self):
addresses = []
for node in self.node_list:
address = "%s:%s" % (node[0], node[1])
addresses.append(address)
return addresses
###########################################################################
@property
def username(self):
return self._uri_obj["username"]
###########################################################################
@property
def password(self):
return self._uri_obj["password"]
###########################################################################
def is_cluster_uri(self):
return len(self.node_list) > 1
###########################################################################
def __str__(self):
return self.masked_uri
###########################################################################
def _get_uri(self, mask=False):
# build db string
db_str = "/%s" % self.database if self.database else ""
# build credentials string
if self.username:
if mask:
creds = "*****:*****@"
else:
creds = "%s:%s@" % (self.username, self.password)
else:
creds = ""
# build hosts string
address_str = ",".join(self.addresses)
return "mongodb://%s%s%s" % (creds, address_str, db_str)
###########################################################################
def _get_member_uri_list(self, mask=False):
# build db string
db_str = "%s" % self.database if self.database else ""
username = self.username
password = "****" if mask else self.password
# build credentials string
if username:
creds = "%s:%s@" % (username, password)
else:
creds = ""
# build hosts string
member_uris = []
for node in self.node_list:
address = "%s:%s" % (node[0], node[1])
mem_uri = "mongodb://%s%s/%s" % (creds, address, db_str)
member_uris.append(mem_uri)
return member_uris
###############################################################################
def parse_mongo_uri(uri):
try:
uri_obj = uri_parser.parse_uri(uri)
# validate uri
nodes = uri_obj["nodelist"]
for node in nodes:
host = node[0]
if not host:
raise Exception("URI '%s' is missing a host." % uri)
return MongoUriWrapper(uri_obj)
except errors.ConfigurationError, e:
raise Exception("Malformed URI '%s'. %s" % (uri, e))
except Exception, e:
raise Exception("Unable to parse mongo uri '%s'."
" Cause: %s" % (e, uri))
###############################################################################
def mask_mongo_uri(uri):
uri_wrapper = parse_mongo_uri(uri)
return uri_wrapper.masked_uri
###############################################################################
def is_mongo_uri(value):
try:
parse_mongo_uri(value)
return True
except Exception,e:
return False
###############################################################################
def is_cluster_mongo_uri(mongo_uri):
"""
Returns true if the specified mongo uri is a cluster connection
"""
return len(parse_mongo_uri(mongo_uri).node_list) > 1
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/mongo_uri_tools.py",
"copies": "3",
"size": "5653",
"license": "mit",
"hash": -8354100921785608000,
"line_mean": 32.2529411765,
"line_max": 79,
"alpha_frac": 0.3700689899,
"autogenerated": false,
"ratio": 5.378686964795433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007519132592022617,
"num_lines": 170
} |
__author__ = 'abdul'
from server import Server
###############################################################################
# CONSTANTS
###############################################################################
###############################################################################
# MongosServer Class
###############################################################################
class MongosServer(Server):
###########################################################################
# Constructor
###########################################################################
def __init__(self, server_doc):
super(MongosServer, self).__init__(server_doc)
###########################################################################
def export_cmd_options(self, options_override=None, standalone=False):
"""
Override!
:return:
"""
cmd_options = super(MongosServer, self).export_cmd_options(
options_override=options_override)
# Add configServers arg
cluster = self.get_validate_cluster()
cmd_options["configdb"] = cluster.get_config_db_address()
return cmd_options
###########################################################################
def is_cluster_connection_member(self):
return True | {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/objects/mongos.py",
"copies": "1",
"size": "1355",
"license": "mit",
"hash": 602501670587533300,
"line_mean": 32.9,
"line_max": 79,
"alpha_frac": 0.3276752768,
"autogenerated": false,
"ratio": 6.642156862745098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.74698321395451,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
from verlib import NormalizedVersion, suggest_normalized_version
# Version support stuff
MIN_SUPPORTED_VERSION = "1.8"
###############################################################################
# MongoctlNormalizedVersion class
# we had to inherit and override __str__ because the suggest_normalized_version
# method does not maintain the release candidate version properly
###############################################################################
class MongoctlNormalizedVersion(NormalizedVersion):
def __init__(self, version_str):
sugg_ver = suggest_normalized_version(version_str)
super(MongoctlNormalizedVersion,self).__init__(sugg_ver)
self.version_str = version_str
def __str__(self):
return self.version_str
###############################################################################
def is_valid_version(version_str):
return suggest_normalized_version(version_str) is not None
###############################################################################
# returns true if version is greater or equal to 1.8
def is_supported_mongo_version(version_str):
return (version_obj(version_str)>=
version_obj(MIN_SUPPORTED_VERSION))
###############################################################################
def version_obj(version_str):
if version_str is None:
return None
#clean version string
try:
version_str = version_str.strip()
version_str = version_str.replace("-pre-" , "-pre")
return MongoctlNormalizedVersion(version_str)
except Exception, e:
return None
| {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/mongo_version.py",
"copies": "2",
"size": "1632",
"license": "mit",
"hash": -3471303163761897500,
"line_mean": 36.9534883721,
"line_max": 79,
"alpha_frac": 0.53125,
"autogenerated": false,
"ratio": 4.990825688073395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6522075688073394,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.errors import MongoctlException
import mongoctl
###############################################################################
# print uri command
###############################################################################
def print_uri_command(parsed_options):
id = parsed_options.id
db = parsed_options.db
# check if the id is a server id
print "in print_uri_command:"
print "\t " + str(id)
uri = ""
server = repository.lookup_server(id)
if server:
print "\t As a server"
uri = server.get_mongo_uri_template(db=db)
print "\t " + str(uri)
else:
print "\t As a cluster"
cluster = repository.lookup_cluster(id)
if cluster:
uri = cluster.get_mongo_uri_template(db=db)
else:
print "\t Raise an error?"
raise MongoctlException("Cannot find a server or a cluster with"
" id '%s'" % id)
if mongoctl.is_service() is False:
print uri
return uri
| {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/commands/misc/print_uri.py",
"copies": "1",
"size": "1105",
"license": "mit",
"hash": 4761182202367542000,
"line_mean": 28.0789473684,
"line_max": 79,
"alpha_frac": 0.5022624434,
"autogenerated": false,
"ratio": 4.42,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006747638326585695,
"num_lines": 38
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.utils import document_pretty_string
from mongoctl.mongoctl_logging import log_info
from mongoctl.objects.replicaset_cluster import ReplicaSetCluster
from mongoctl.errors import MongoctlException
###############################################################################
# configure cluster command
###############################################################################
def configure_cluster_command(parsed_options):
cluster_id = parsed_options.cluster
cluster = repository.lookup_and_validate_cluster(cluster_id)
if not isinstance(cluster, ReplicaSetCluster):
raise MongoctlException("Cluster '%s' is not a replicaset cluster" %
cluster.id)
force_primary_server_id = parsed_options.forcePrimaryServer
if parsed_options.dryRun:
dry_run_configure_cluster(cluster,
force_primary_server_id=
force_primary_server_id)
else:
configure_cluster(cluster,
force_primary_server_id=
force_primary_server_id)
###############################################################################
# ReplicaSetCluster Methods
###############################################################################
def configure_cluster(cluster, force_primary_server_id=None):
force_primary_server = None
# validate force primary
if force_primary_server_id:
force_primary_server = \
repository.lookup_and_validate_server(force_primary_server_id)
configure_replica_cluster(cluster,
force_primary_server=force_primary_server)
###############################################################################
def configure_replica_cluster(replica_cluster, force_primary_server=None):
replica_cluster.configure_replicaset(force_primary_server=
force_primary_server)
###############################################################################
def dry_run_configure_cluster(cluster, force_primary_server_id=None):
log_info("\n************ Dry Run ************\n")
db_command = None
force = force_primary_server_id is not None
if cluster.is_replicaset_initialized():
log_info("Replica set already initialized. "
"Making the replSetReconfig command...")
db_command = cluster.get_replicaset_reconfig_db_command(force=force)
else:
log_info("Replica set has not yet been initialized."
" Making the replSetInitiate command...")
db_command = cluster.get_replicaset_init_all_db_command()
log_info("Executing the following command on the current primary:")
log_info(document_pretty_string(db_command))
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/commands/cluster/configure.py",
"copies": "3",
"size": "2825",
"license": "mit",
"hash": -7132978114343946000,
"line_mean": 43.140625,
"line_max": 79,
"alpha_frac": 0.5553982301,
"autogenerated": false,
"ratio": 4.896013864818024,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6951412094918025,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
import shutil
from mongoctl.mongoctl_logging import log_info
from mongoctl.errors import MongoctlException
from stop import do_stop_server
from start import do_start_server
###############################################################################
# re-sync secondary command
###############################################################################
def resync_secondary_command(parsed_options):
resync_secondary(parsed_options.server)
###############################################################################
def resync_secondary(server_id):
server = repository.lookup_and_validate_server(server_id)
server.validate_local_op("resync-secondary")
log_info("Checking if server '%s' is secondary..." % server_id)
# get the server status
status = server.get_status(admin=True)
if not status['connection']:
msg = ("Server '%s' does not seem to be running. For more details,"
" run 'mongoctl status %s'" % (server_id, server_id))
raise MongoctlException(msg)
elif 'error' in status:
msg = ("There was an error while connecting to server '%s' (error:%s)."
" For more details, run 'mongoctl status %s'" %
(server_id, status['error'], server_id))
raise MongoctlException(msg)
rs_state = None
if 'selfReplicaSetStatusSummary' in status:
rs_state = status['selfReplicaSetStatusSummary']['stateStr']
if rs_state not in ['SECONDARY', 'RECOVERING']:
msg = ("Server '%s' is not a secondary member or cannot be determined"
" as secondary (stateStr='%s'. For more details, run 'mongoctl"
" status %s'" % (server_id, rs_state, server_id))
raise MongoctlException(msg)
do_stop_server(server)
log_info("Deleting server's '%s' dbpath '%s'..." %
(server_id, server.get_db_path()))
shutil.rmtree(server.get_db_path())
do_start_server(server)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/server/resync_secondary.py",
"copies": "3",
"size": "2014",
"license": "mit",
"hash": -5144760799878899000,
"line_mean": 37,
"line_max": 79,
"alpha_frac": 0.5759682224,
"autogenerated": false,
"ratio": 4.195833333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6271801555733333,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.commands.command_utils import (
extract_mongo_exe_options, get_mongo_executable, options_to_command_args,
VersionPreference
)
from mongoctl.mongoctl_logging import log_info, log_error
from mongoctl.mongo_uri_tools import is_mongo_uri, parse_mongo_uri
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command
from mongoctl.objects.server import Server
from mongoctl.objects.mongod import MongodServer
###############################################################################
# CONSTS
###############################################################################
SUPPORTED_MONGO_SHELL_OPTIONS = [
"shell",
"norc",
"quiet",
"eval",
"verbose",
"ipv6",
"port",
"ssl",
"sslCAFile",
]
###############################################################################
# connect command
###############################################################################
def connect_command(parsed_options):
shell_options = extract_mongo_shell_options(parsed_options)
open_mongo_shell_to(parsed_options.dbAddress,
username=parsed_options.username,
password=parsed_options.password,
shell_options=shell_options,
js_files=parsed_options.jsFiles)
###############################################################################
def extract_mongo_shell_options(parsed_args):
return extract_mongo_exe_options(parsed_args,
SUPPORTED_MONGO_SHELL_OPTIONS)
###############################################################################
# open_mongo_shell_to
###############################################################################
def open_mongo_shell_to(db_address,
username=None,
password=None,
shell_options=None,
js_files=None):
if is_mongo_uri(db_address):
open_mongo_shell_to_uri(db_address, username, password,
shell_options, js_files)
return
# db_address is an id string
id_path = db_address.split("/")
id = id_path[0]
database = id_path[1] if len(id_path) == 2 else None
server = repository.lookup_server(id)
if server:
open_mongo_shell_to_server(server, database, username, password,
shell_options, js_files)
return
# Maybe cluster?
cluster = repository.lookup_cluster(id)
if cluster:
open_mongo_shell_to_cluster(cluster, database, username, password,
shell_options, js_files)
return
# Unknown destination
raise MongoctlException("Unknown db address '%s'" % db_address)
###############################################################################
def open_mongo_shell_to_server(server,
database=None,
username=None,
password=None,
shell_options=None,
js_files=None):
repository.validate_server(server)
if not database:
if isinstance(server, MongodServer) and server.is_arbiter_server():
database = "local"
else:
database = "admin"
if username or server.needs_to_auth(database):
# authenticate and grab a working username/password
username, password = server.get_working_login(database, username,
password)
do_open_mongo_shell_to(server.get_connection_address(),
database=database,
username=username,
password=password,
server_version=server.get_mongo_version_info(),
shell_options=shell_options,
js_files=js_files,
ssl=server.use_ssl_client())
###############################################################################
def open_mongo_shell_to_cluster(cluster,
database=None,
username=None,
password=None,
shell_options=None,
js_files=None):
log_info("Locating default server for cluster '%s'..." % cluster.id)
default_server = cluster.get_default_server()
if default_server:
log_info("Connecting to server '%s'" % default_server.id)
open_mongo_shell_to_server(default_server,
database=database,
username=username,
password=password,
shell_options=shell_options,
js_files=js_files)
else:
log_error("No default server found for cluster '%s'" %
cluster.id)
###############################################################################
def open_mongo_shell_to_uri(uri,
username=None,
password=None,
shell_options=None,
js_files=None):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database
username = username if username else uri_wrapper.username
password = password if password else uri_wrapper.password
server_or_cluster = repository.build_server_or_cluster_from_uri(uri)
if isinstance(server_or_cluster, Server):
open_mongo_shell_to_server(server_or_cluster,
database=database,
username=username,
password=password,
shell_options=shell_options,
js_files=js_files)
else:
open_mongo_shell_to_cluster(server_or_cluster,
database=database,
username=username,
password=password,
shell_options=shell_options,
js_files=js_files)
###############################################################################
def do_open_mongo_shell_to(address,
database=None,
username=None,
password=None,
server_version=None,
shell_options=None,
js_files=None,
ssl=False):
# default database to admin
database = database if database else "admin"
shell_options = shell_options or {}
js_files = js_files or []
# override port if specified in --port
if "port" in shell_options:
address = "%s:%s" % (address.split(":")[0], shell_options["port"])
# remove port from options since passing address + port is disallowed in mongo
del shell_options["port"]
connect_cmd = [get_mongo_shell_executable(server_version),
"%s/%s" % (address, database)]
if username:
connect_cmd.extend(["-u",username, "-p"])
if password:
connect_cmd.extend([password])
# append shell options
if shell_options:
connect_cmd.extend(options_to_command_args(shell_options))
# append js files
if js_files:
connect_cmd.extend(js_files)
# ssl options
if ssl and "--ssl" not in connect_cmd:
connect_cmd.append("--ssl")
cmd_display = connect_cmd[:]
# mask user/password
if username:
cmd_display[cmd_display.index("-u") + 1] = "****"
if password:
cmd_display[cmd_display.index("-p") + 1] = "****"
log_info("Executing command: \n%s" % " ".join(cmd_display))
call_command(connect_cmd, bubble_exit_code=True)
###############################################################################
def get_mongo_shell_executable(server_version):
shell_exe = get_mongo_executable(server_version,
'mongo',
version_check_pref=VersionPreference.EXACT_OR_MINOR)
return shell_exe.path
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/common/connect.py",
"copies": "1",
"size": "8503",
"license": "mit",
"hash": -6104325432770128000,
"line_mean": 37.3018018018,
"line_max": 89,
"alpha_frac": 0.4673644596,
"autogenerated": false,
"ratio": 5.061309523809523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6028673983409524,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.commands.command_utils import (
extract_mongo_exe_options, get_mongo_executable, options_to_command_args,
VERSION_PREF_MAJOR_GE
)
from mongoctl.mongoctl_logging import log_info, log_error
from mongoctl.mongo_uri_tools import is_mongo_uri, parse_mongo_uri
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command
from mongoctl.objects.server import Server
from mongoctl.objects.mongod import MongodServer
###############################################################################
# CONSTS
###############################################################################
SUPPORTED_MONGO_SHELL_OPTIONS = [
"shell",
"norc",
"quiet",
"eval",
"verbose",
"ipv6",
]
###############################################################################
# connect command
###############################################################################
def connect_command(parsed_options):
shell_options = extract_mongo_shell_options(parsed_options)
open_mongo_shell_to(parsed_options.dbAddress,
username=parsed_options.username,
password=parsed_options.password,
shell_options=shell_options,
js_files=parsed_options.jsFiles)
###############################################################################
def extract_mongo_shell_options(parsed_args):
return extract_mongo_exe_options(parsed_args,
SUPPORTED_MONGO_SHELL_OPTIONS)
###############################################################################
# open_mongo_shell_to
###############################################################################
def open_mongo_shell_to(db_address,
username=None,
password=None,
shell_options={},
js_files=[]):
if is_mongo_uri(db_address):
open_mongo_shell_to_uri(db_address, username, password,
shell_options, js_files)
return
# db_address is an id string
id_path = db_address.split("/")
id = id_path[0]
database = id_path[1] if len(id_path) == 2 else None
server = repository.lookup_server(id)
if server:
open_mongo_shell_to_server(server, database, username, password,
shell_options, js_files)
return
# Maybe cluster?
cluster = repository.lookup_cluster(id)
if cluster:
open_mongo_shell_to_cluster(cluster, database, username, password,
shell_options, js_files)
return
# Unknown destination
raise MongoctlException("Unknown db address '%s'" % db_address)
###############################################################################
def open_mongo_shell_to_server(server,
database=None,
username=None,
password=None,
shell_options={},
js_files=[]):
repository.validate_server(server)
if not database:
if isinstance(server, MongodServer) and server.is_arbiter_server():
database = "local"
else:
database = "admin"
if username or server.needs_to_auth(database):
# authenticate and grab a working username/password
username, password = server.get_working_login(database, username,
password)
do_open_mongo_shell_to(server.get_connection_address(),
database,
username,
password,
server.get_mongo_version(),
shell_options,
js_files)
###############################################################################
def open_mongo_shell_to_cluster(cluster,
database=None,
username=None,
password=None,
shell_options={},
js_files=[]):
log_info("Locating default server for cluster '%s'..." % cluster.id)
default_server = cluster.get_default_server()
if default_server:
log_info("Connecting to server '%s'" % default_server.id)
open_mongo_shell_to_server(default_server,
database=database,
username=username,
password=password,
shell_options=shell_options,
js_files=js_files)
else:
log_error("No default server found for cluster '%s'" %
cluster.id)
###############################################################################
def open_mongo_shell_to_uri(uri,
username=None,
password=None,
shell_options={},
js_files=[]):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database
username = username if username else uri_wrapper.username
password = password if password else uri_wrapper.password
server_or_cluster = repository.build_server_or_cluster_from_uri(uri)
if isinstance(server_or_cluster, Server):
open_mongo_shell_to_server(server_or_cluster,
database=database,
username=username,
password=password,
shell_options=shell_options,
js_files=js_files)
else:
open_mongo_shell_to_cluster(server_or_cluster,
database=database,
username=username,
password=password,
shell_options=shell_options,
js_files=js_files)
###############################################################################
def do_open_mongo_shell_to(address,
database=None,
username=None,
password=None,
server_version=None,
shell_options={},
js_files=[]):
# default database to admin
database = database if database else "admin"
connect_cmd = [get_mongo_shell_executable(server_version),
"%s/%s" % (address, database)]
if username:
connect_cmd.extend(["-u",username, "-p"])
if password:
connect_cmd.extend([password])
# append shell options
if shell_options:
connect_cmd.extend(options_to_command_args(shell_options))
# append js files
if js_files:
connect_cmd.extend(js_files)
cmd_display = connect_cmd[:]
# mask user/password
if username:
cmd_display[cmd_display.index("-u") + 1] = "****"
if password:
cmd_display[cmd_display.index("-p") + 1] = "****"
log_info("Executing command: \n%s" % " ".join(cmd_display))
call_command(connect_cmd, bubble_exit_code=True)
###############################################################################
def get_mongo_shell_executable(server_version):
shell_exe = get_mongo_executable(server_version,
'mongo',
version_check_pref=VERSION_PREF_MAJOR_GE)
return shell_exe.path | {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/commands/common/connect.py",
"copies": "2",
"size": "7835",
"license": "mit",
"hash": 1232361655658958300,
"line_mean": 36.6730769231,
"line_max": 79,
"alpha_frac": 0.4534779834,
"autogenerated": false,
"ratio": 5.21290751829674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.666638550169674,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.mongoctl_logging import log_info
from start import extract_server_options, do_start_server
from stop import do_stop_server
###############################################################################
# restart command
###############################################################################
def restart_command(parsed_options):
server_id = parsed_options.server
server = repository.lookup_and_validate_server(server_id)
options_override = extract_server_options(server, parsed_options)
restart_server(parsed_options.server, options_override)
###############################################################################
# restart server
###############################################################################
def restart_server(server_id, options_override=None):
server = repository.lookup_and_validate_server(server_id)
do_restart_server(server, options_override)
###############################################################################
def do_restart_server(server, options_override=None):
log_info("Restarting server '%s'..." % server.id)
if server.is_online():
do_stop_server(server)
else:
log_info("Server '%s' is not running." % server.id)
do_start_server(server, options_override)
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/commands/server/restart.py",
"copies": "3",
"size": "1357",
"license": "mit",
"hash": -2757331736204144000,
"line_mean": 35.6756756757,
"line_max": 79,
"alpha_frac": 0.515843773,
"autogenerated": false,
"ratio": 4.916666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6932510439666667,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.mongoctl_logging import *
from mongoctl.errors import MongoctlException
from mongoctl.utils import document_pretty_string
from mongoctl.objects.mongod import MongodServer
###############################################################################
# status command TODO: parsed?
###############################################################################
def status_command(parsed_options):
# we need to print status json to stdout so that its seperate from all
# other messages that are printed on stderr. This is so scripts can read
# status json and parse it if it needs
id = parsed_options.id
server = repository.lookup_server(id)
if server:
log_info("Status for server '%s':" % id)
status = server.get_status(admin=True)
else:
cluster = repository.lookup_cluster(id)
if cluster:
log_info("Status for cluster '%s':" % id)
status = cluster.get_status()
else:
raise MongoctlException("Cannot find a server or a cluster with"
" id '%s'" % id)
status_str = document_pretty_string(status)
stdout_log(status_str)
return status
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/common/status.py",
"copies": "1",
"size": "1264",
"license": "mit",
"hash": -1982838262129137000,
"line_mean": 37.303030303,
"line_max": 79,
"alpha_frac": 0.5775316456,
"autogenerated": false,
"ratio": 4.596363636363637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5673895281963637,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.mongo_uri_tools import is_mongo_uri, parse_mongo_uri
from mongoctl.utils import resolve_path
from mongoctl.mongoctl_logging import log_info , log_warning
from mongoctl.commands.command_utils import (
is_db_address, is_dbpath, extract_mongo_exe_options, get_mongo_executable,
options_to_command_args,
VERSION_PREF_EXACT_OR_MINOR
)
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command
from mongoctl.objects.server import Server
from mongoctl.mongo_version import version_obj, MongoctlNormalizedVersion
###############################################################################
# CONSTS
###############################################################################
SUPPORTED_MONGO_RESTORE_OPTIONS = [
"directoryperdb",
"journal",
"collection",
"ipv6",
"filter",
"objcheck",
"drop",
"oplogReplay",
"keepIndexVersion",
"verbose",
"authenticationDatabase"
]
###############################################################################
# restore command
###############################################################################
def restore_command(parsed_options):
# get and validate source/destination
source = parsed_options.source
destination = parsed_options.destination
is_addr = is_db_address(destination)
is_path = is_dbpath(destination)
if is_addr and is_path:
msg = ("Ambiguous destination value '%s'. Your destination matches"
" both a dbpath and a db address. Use prefix 'file://',"
" 'cluster://' or 'server://' to make it more specific" %
destination)
raise MongoctlException(msg)
elif not (is_addr or is_path):
raise MongoctlException("Invalid destination value '%s'. Destination has to be"
" a valid db address or dbpath." % destination)
restore_options = extract_mongo_restore_options(parsed_options)
if is_addr:
mongo_restore_db_address(destination,
source,
username=parsed_options.username,
password=parsed_options.password,
restore_options=restore_options)
else:
dbpath = resolve_path(destination)
mongo_restore_db_path(dbpath, source, restore_options=restore_options)
###############################################################################
# mongo_restore
###############################################################################
def mongo_restore_db_address(db_address,
source,
username=None,
password=None,
restore_options=None):
if is_mongo_uri(db_address):
mongo_restore_uri(db_address, source, username, password,
restore_options)
return
# db_address is an id string
id_path = db_address.split("/")
id = id_path[0]
database = id_path[1] if len(id_path) == 2 else None
server = repository.lookup_server(id)
if server:
mongo_restore_server(server, source, database=database,
username=username, password=password,
restore_options=restore_options)
return
else:
cluster = repository.lookup_cluster(id)
if cluster:
mongo_restore_cluster(cluster, source, database=database,
username=username, password=password,
restore_options=restore_options)
return
raise MongoctlException("Unknown db address '%s'" % db_address)
###############################################################################
def mongo_restore_db_path(dbpath, source, restore_options=None):
do_mongo_restore(source, dbpath=dbpath, restore_options=restore_options)
###############################################################################
def mongo_restore_uri(uri, source,
username=None,
password=None,
restore_options=None):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database
username = username if username else uri_wrapper.username
password = password if password else uri_wrapper.password
server_or_cluster = repository.build_server_or_cluster_from_uri(uri)
if isinstance(server_or_cluster, Server):
mongo_restore_server(server_or_cluster, source, database=database,
username=username, password=password,
restore_options=restore_options)
else:
mongo_restore_cluster(server_or_cluster, source, database=database,
username=username, password=password,
restore_options=restore_options)
###############################################################################
def mongo_restore_server(server, source,
database=None,
username=None,
password=None,
restore_options=None):
repository.validate_server(server)
# auto complete password if possible
if username:
if not password and database:
password = server.lookup_password(database, username)
if not password:
password = server.lookup_password("admin", username)
do_mongo_restore(source,
host=server.get_connection_host_address(),
port=server.get_port(),
database=database,
username=username,
password=password,
server_version=server.get_mongo_version(),
restore_options=restore_options)
###############################################################################
def mongo_restore_cluster(cluster, source,
database=None,
username=None,
password=None,
restore_options=None):
repository.validate_cluster(cluster)
log_info("Locating default server for cluster '%s'..." % cluster.id)
default_server = cluster.get_default_server()
if default_server:
log_info("Restoring default server '%s'" % default_server.id)
mongo_restore_server(default_server, source,
database=database,
username=username,
password=password,
restore_options=restore_options)
else:
raise MongoctlException("No default server found for cluster '%s'" %
cluster.id)
###############################################################################
def do_mongo_restore(source,
host=None,
port=None,
dbpath=None,
database=None,
username=None,
password=None,
server_version=None,
restore_options=None):
# create restore command with host and port
restore_cmd = [get_mongo_restore_executable(server_version)]
if host:
restore_cmd.extend(["--host", host])
if port:
restore_cmd.extend(["--port", str(port)])
# dbpath
if dbpath:
restore_cmd.extend(["--dbpath", dbpath])
# database
if database:
restore_cmd.extend(["-d", database])
# username and password
if username:
restore_cmd.extend(["-u", username, "-p"])
if password:
restore_cmd.append(password)
# ignore authenticationDatabase option is server_version is less than 2.4.0
if (restore_options and "authenticationDatabase" in restore_options and
server_version and
version_obj(server_version) < MongoctlNormalizedVersion("2.4.0")):
restore_options.pop("authenticationDatabase", None)
# append shell options
if restore_options:
restore_cmd.extend(options_to_command_args(restore_options))
# pass source arg
restore_cmd.append(source)
cmd_display = restore_cmd[:]
# mask user/password
if username:
cmd_display[cmd_display.index("-u") + 1] = "****"
if password:
cmd_display[cmd_display.index("-p") + 1] = "****"
# execute!
log_info("Executing command: \n%s" % " ".join(cmd_display))
call_command(restore_cmd, bubble_exit_code=True)
###############################################################################
def get_mongo_restore_executable(server_version):
restore_exe = get_mongo_executable(server_version,
'mongorestore',
version_check_pref=
VERSION_PREF_EXACT_OR_MINOR)
# Warn the user if it is not an exact match (minor match)
if server_version and version_obj(server_version) != restore_exe.version:
log_warning("Using mongorestore '%s' that does not exactly match"
"server version '%s'" % (restore_exe.version,
server_version))
return restore_exe.path
###############################################################################
def extract_mongo_restore_options(parsed_args):
return extract_mongo_exe_options(parsed_args,
SUPPORTED_MONGO_RESTORE_OPTIONS)
| {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/commands/common/restore.py",
"copies": "2",
"size": "9719",
"license": "mit",
"hash": 6851981200272323000,
"line_mean": 36.96484375,
"line_max": 87,
"alpha_frac": 0.5164111534,
"autogenerated": false,
"ratio": 4.966274910577415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6482686063977415,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import os
import re
import mongoctl.repository as repository
from mongoctl.mongoctl_logging import *
from mongoctl import config
from mongoctl.errors import MongoctlException
from mongoctl.utils import is_exe, which, resolve_path, execute_command
from mongoctl.mongo_version import version_obj
from mongoctl.mongo_uri_tools import is_mongo_uri
###############################################################################
# CONSTS
###############################################################################
MONGO_HOME_ENV_VAR = "MONGO_HOME"
MONGO_VERSIONS_ENV_VAR = "MONGO_VERSIONS"
# VERSION CHECK PREFERENCE CONSTS
VERSION_PREF_EXACT = 0
VERSION_PREF_GREATER = 1
VERSION_PREF_MAJOR_GE = 2
VERSION_PREF_LATEST_STABLE = 3
VERSION_PREF_EXACT_OR_MINOR = 4
def extract_mongo_exe_options(parsed_args, supported_options):
options_extract = {}
# Iterating over parsed options dict
# Yeah in a hacky way since there is no clean documented way of doing that
# See http://bugs.python.org/issue11076 for more details
# this should be changed when argparse provides a cleaner way
for (option_name,option_val) in parsed_args.__dict__.items():
if option_name in supported_options and option_val is not None:
options_extract[option_name] = option_val
return options_extract
###############################################################################
def get_mongo_executable(server_version,
executable_name,
version_check_pref=VERSION_PREF_EXACT):
mongo_home = os.getenv(MONGO_HOME_ENV_VAR)
mongo_installs_dir = config.get_mongodb_installs_dir()
ver_disp = "[Unspecified]" if server_version is None else server_version
log_verbose("Looking for a compatible %s for mongoVersion=%s." %
(executable_name, ver_disp))
exe_version_tuples = find_all_executables(executable_name)
if len(exe_version_tuples) > 0:
selected_exe = best_executable_match(executable_name,
exe_version_tuples,
server_version,
version_check_pref=
version_check_pref)
if selected_exe is not None:
log_info("Using %s at '%s' version '%s'..." %
(executable_name,
selected_exe.path,
selected_exe.version))
return selected_exe
## ok nothing found at all. wtf case
msg = ("Unable to find a compatible '%s' executable "
"for version %s. You may need to run 'mongoctl install-mongodb %s'"
" to install it.\n\n"
"Here is your enviroment:\n\n"
"$PATH=%s\n\n"
"$MONGO_HOME=%s\n\n"
"mongoDBInstallationsDirectory=%s (in mongoctl.config)" %
(executable_name, ver_disp, ver_disp,
os.getenv("PATH"),
mongo_home,
mongo_installs_dir))
raise MongoctlException(msg)
###############################################################################
def find_all_executables(executable_name):
# create a list of all available executables found and then return the best
# match if applicable
executables_found = []
####### Look in $PATH
path_executable = which(executable_name)
if path_executable is not None:
add_to_executables_found(executables_found, path_executable)
#### Look in $MONGO_HOME if set
mongo_home = os.getenv(MONGO_HOME_ENV_VAR)
if mongo_home is not None:
mongo_home = resolve_path(mongo_home)
mongo_home_exe = get_mongo_home_exe(mongo_home, executable_name)
add_to_executables_found(executables_found, mongo_home_exe)
# Look in mongod_installs_dir if set
mongo_installs_dir = config.get_mongodb_installs_dir()
if mongo_installs_dir is not None:
if os.path.exists(mongo_installs_dir):
for mongo_installation in os.listdir(mongo_installs_dir):
child_mongo_home = os.path.join(mongo_installs_dir,
mongo_installation)
child_mongo_exe = get_mongo_home_exe(child_mongo_home,
executable_name)
add_to_executables_found(executables_found, child_mongo_exe)
return get_exe_version_tuples(executables_found)
###############################################################################
def add_to_executables_found(executables_found, executable):
if is_valid_mongo_exe(executable):
if executable not in executables_found:
executables_found.append(executable)
else:
log_verbose("Not a valid executable '%s'. Skipping..." % executable)
###############################################################################
def best_executable_match(executable_name,
exe_version_tuples,
version_str,
version_check_pref=VERSION_PREF_EXACT):
version = version_obj(version_str)
match_func = exact_exe_version_match
exe_versions_str = exe_version_tuples_to_strs(exe_version_tuples)
log_verbose("Found the following %s's. Selecting best match "
"for version %s\n%s" %(executable_name, version_str,
exe_versions_str))
if version is None:
log_verbose("mongoVersion is null. "
"Selecting default %s" % executable_name)
match_func = default_match
elif version_check_pref == VERSION_PREF_LATEST_STABLE:
match_func = latest_stable_exe
elif version_check_pref == VERSION_PREF_MAJOR_GE:
match_func = major_ge_exe_version_match
elif version_check_pref == VERSION_PREF_EXACT_OR_MINOR:
match_func = exact_or_minor_exe_version_match
return match_func(executable_name, exe_version_tuples, version)
###############################################################################
def default_match(executable_name, exe_version_tuples, version):
default_exe = latest_stable_exe(executable_name, exe_version_tuples)
if default_exe is None:
log_verbose("No stable %s found. Looking for any latest available %s "
"..." % (executable_name, executable_name))
default_exe = latest_exe(executable_name, exe_version_tuples)
return default_exe
###############################################################################
def exact_exe_version_match(executable_name, exe_version_tuples, version):
for mongo_exe,exe_version in exe_version_tuples:
if exe_version == version:
return mongo_exe_object(mongo_exe, exe_version)
return None
###############################################################################
def latest_stable_exe(executable_name, exe_version_tuples, version=None):
log_verbose("Find the latest stable %s" % executable_name)
# find greatest stable exe
# hold values in a list of (exe,version) tuples
stable_exes = []
for mongo_exe,exe_version in exe_version_tuples:
# get the release number (e.g. A.B.C, release number is B here)
release_num = exe_version.parts[0][1]
# stable releases are the even ones
if (release_num % 2) == 0:
stable_exes.append((mongo_exe, exe_version))
return latest_exe(executable_name, stable_exes)
###############################################################################
def latest_exe(executable_name, exe_version_tuples, version=None):
# Return nothing if nothing compatible
if len(exe_version_tuples) == 0:
return None
# sort desc by version
exe_version_tuples.sort(key=lambda t: t[1], reverse=True)
exe = exe_version_tuples[0]
return mongo_exe_object(exe[0], exe[1])
###############################################################################
def major_ge_exe_version_match(executable_name, exe_version_tuples, version):
# find all compatible exes then return closet match (min version)
# hold values in a list of (exe,version) tuples
compatible_exes = []
for mongo_exe,exe_version in exe_version_tuples:
if exe_version.parts[0][0] >= version.parts[0][0]:
compatible_exes.append((mongo_exe, exe_version))
# Return nothing if nothing compatible
if len(compatible_exes) == 0:
return None
# find the best fit
compatible_exes.sort(key=lambda t: t[1])
exe = compatible_exes[-1]
return mongo_exe_object(exe[0], exe[1])
###############################################################################
def exact_or_minor_exe_version_match(executable_name,
exe_version_tuples,
version):
"""
IF there is an exact match then use it
OTHERWISE try to find a minor version match
"""
exe = exact_exe_version_match(executable_name,
exe_version_tuples,
version)
if not exe:
exe = minor_exe_version_match(executable_name,
exe_version_tuples,
version)
return exe
###############################################################################
def minor_exe_version_match(executable_name,
exe_version_tuples,
version):
# hold values in a list of (exe,version) tuples
compatible_exes = []
for mongo_exe,exe_version in exe_version_tuples:
# compatible ==> major + minor equality
if (exe_version.parts[0][0] == version.parts[0][0] and
exe_version.parts[0][1] == version.parts[0][1]):
compatible_exes.append((mongo_exe, exe_version))
# Return nothing if nothing compatible
if len(compatible_exes) == 0:
return None
# find the best fit
compatible_exes.sort(key=lambda t: t[1])
exe = compatible_exes[-1]
return mongo_exe_object(exe[0], exe[1])
###############################################################################
def get_exe_version_tuples(executables):
exe_ver_tuples = []
for mongo_exe in executables:
try:
exe_version = mongo_exe_version(mongo_exe)
exe_ver_tuples.append((mongo_exe, exe_version))
except Exception, e:
log_exception(e)
log_verbose("Skipping executable '%s': %s" % (mongo_exe, e))
return exe_ver_tuples
###############################################################################
def exe_version_tuples_to_strs(exe_ver_tuples):
strs = []
for mongo_exe,exe_version in exe_ver_tuples:
strs.append("%s = %s" % (mongo_exe, exe_version))
return "\n".join(strs)
###############################################################################
def is_valid_mongo_exe(path):
return path is not None and is_exe(path)
###############################################################################
def get_mongo_home_exe(mongo_home, executable_name):
return os.path.join(mongo_home, 'bin', executable_name)
###############################################################################
def mongo_exe_version(mongo_exe):
try:
re_expr = "v?((([0-9]+)\.([0-9]+)\.([0-9]+))([^, ]*))"
vers_spew = execute_command([mongo_exe, "--version"])
# only take first line of spew
vers_spew = vers_spew.split('\n')[0]
vers_grep = re.findall(re_expr, vers_spew)
full_version = vers_grep[-1][0]
result = version_obj(full_version)
if result is not None:
return result
else:
raise MongoctlException("Cannot parse mongo version from the"
" output of '%s --version'" % mongo_exe)
except Exception, e:
log_exception(e)
raise MongoctlException("Unable to get mongo version of '%s'."
" Cause: %s" % (mongo_exe, e))
###############################################################################
class MongoExeObject():
pass
###############################################################################
def mongo_exe_object(exe_path, exe_version):
exe_obj = MongoExeObject()
exe_obj.path = exe_path
exe_obj.version = exe_version
return exe_obj
###############################################################################
def options_to_command_args(args):
command_args=[]
for (arg_name,arg_val) in sorted(args.iteritems()):
# append the arg name and val as needed
if not arg_val:
continue
elif arg_val == True:
command_args.append("--%s" % arg_name)
else:
command_args.append("--%s" % arg_name)
command_args.append(str(arg_val))
return command_args
###############################################################################
def is_server_or_cluster_db_address(value):
"""
checks if the specified value is in the form of
[server or cluster id][/database]
"""
# check if value is an id string
id_path = value.split("/")
id = id_path[0]
return len(id_path) <= 2 and (repository.lookup_server(id) or
repository.lookup_cluster(id))
###############################################################################
def is_db_address(value):
"""
Checks if the specified value is a valid mongoctl database address
"""
return value and (is_mongo_uri(value) or
is_server_or_cluster_db_address(value))
###############################################################################
def is_dbpath(value):
"""
Checks if the specified value is a dbpath. dbpath could be an absolute
file path, relative path or a file uri
"""
value = resolve_path(value)
return os.path.exists(value)
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/commands/command_utils.py",
"copies": "2",
"size": "14067",
"license": "mit",
"hash": 4543575385565772300,
"line_mean": 37.3297002725,
"line_max": 79,
"alpha_frac": 0.5242055875,
"autogenerated": false,
"ratio": 4.282191780821917,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5806397368321917,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import os
import stat
import subprocess
import re
import signal
import resource
import mongoctl.repository as repository
from mongoctl.commands.command_utils import (
options_to_command_args, extract_mongo_exe_options
)
from mongoctl.mongoctl_logging import *
from mongoctl.errors import MongoctlException
from mongoctl import users
from mongoctl.processes import(
communicate_to_child_process, create_subprocess, get_child_processes
)
from mongoctl.prompt import prompt_execute_task
from mongoctl.utils import (
ensure_dir, which, wait_for, dir_exists, is_pid_alive
)
from tail_log import tail_server_log, stop_tailing
from mongoctl.commands.command_utils import (
get_mongo_executable, VERSION_PREF_EXACT
)
from mongoctl.prompt import prompt_confirm
from mongoctl.objects.mongod import MongodServer
from mongoctl.objects.mongos import MongosServer
###############################################################################
# CONSTS
###############################################################################
# OS resource limits to impose on the 'mongod' process (see setrlimit(2))
PROCESS_LIMITS = [
# Many TCP/IP connections to mongod ==> many threads to handle them ==>
# RAM footprint of many stacks. Ergo, limit the stack size per thread:
('RLIMIT_STACK', "stack size (in bytes)", 1024 * 1024),
# Speaking of connections, we'd like to be able to have a lot of them:
('RLIMIT_NOFILE', "number of file descriptors", 65536)
]
###############################################################################
# start command
###############################################################################
def start_command(parsed_options):
# Obtain the server id from the command line
server_id = parsed_options.server
"""
Search the server specification in the configuration file
server is in the type of mongos or mongod
"""
server = repository.lookup_and_validate_server(server_id)
# Get options for this server from command line, which can override the options in the config file
options_override = extract_server_options(server, parsed_options)
rs_add = parsed_options.rsAdd or parsed_options.rsAddNoInit
if parsed_options.dryRun:
dry_run_start_server_cmd(server, options_override)
else:
start_server(server,
options_override=options_override,
rs_add=rs_add,
no_init=parsed_options.rsAddNoInit)
###############################################################################
def extract_server_options(server, parsed_args):
if isinstance(server, MongodServer):
return extract_mongo_exe_options(parsed_args, SUPPORTED_MONGOD_OPTIONS)
elif isinstance(server, MongosServer):
return extract_mongo_exe_options(parsed_args, SUPPORTED_MONGOS_OPTIONS)
###############################################################################
def dry_run_start_server_cmd(server, options_override=None):
# ensure that the start was issued locally. Fail otherwise
server.validate_local_op("start")
log_info("************ Dry Run ************\n")
start_cmd = generate_start_command(server, options_override)
start_cmd_str = " ".join(start_cmd)
log_info("\nCommand:")
log_info("%s\n" % start_cmd_str)
###############################################################################
# start server
###############################################################################
def start_server(server, options_override=None, rs_add=False, no_init=False):
do_start_server(server,
options_override=options_override,
rs_add=rs_add,
no_init=no_init)
###############################################################################
__mongod_pid__ = None
__current_server__ = None
###############################################################################
def do_start_server(server, options_override=None, rs_add=False, no_init=False):
# ensure that the start was issued locally. Fail otherwise
server.validate_local_op("start")
log_info("Checking to see if server '%s' is already running"
" before starting it..." % server.id)
status = server.get_status()
if status['connection']:
log_info("Server '%s' is already running." %
server.id)
return
elif "timedOut" in status:
raise MongoctlException("Unable to start server: Server '%s' seems to"
" be already started but is"
" not responding (connection timeout)."
" Or there might some server running on the"
" same port %s" %
(server.id, server.get_port()))
# check if there is another process running on the same port
elif "error" in status and ("closed" in status["error"] or
"reset" in status["error"] or
"ids don't match" in status["error"]):
raise MongoctlException("Unable to start server: Either server '%s' is "
"started but not responding or port %s is "
"already in use." %
(server.id, server.get_port()))
# do necessary work before starting the mongod/mongos process
_pre_server_start(server, options_override=options_override)
server.log_server_activity("start")
server_pid = start_server_process(server, options_override)
_post_server_start(server, server_pid, rs_add=rs_add, no_init=no_init)
# Note: The following block has to be the last block
# because server_process.communicate() will not return unless you
# interrupt the server process which will kill mongoctl, so nothing after
# this block will be executed. Almost never...
if not server.is_fork():
communicate_to_child_process(server_pid)
###############################################################################
def _pre_server_start(server, options_override=None):
if isinstance(server, MongodServer):
_pre_mongod_server_start(server, options_override=options_override)
###############################################################################
def _pre_mongod_server_start(server, options_override=None):
"""
Does necessary work before starting a server
1- An efficiency step for arbiters running with --no-journal
* there is a lock file ==>
* server must not have exited cleanly from last run, and does not know
how to auto-recover (as a journalled server would)
* however: this is an arbiter, therefore
* there is no need to repair data files in any way ==>
* i can rm this lockfile and start my server
"""
lock_file_path = server.get_lock_file_path()
no_journal = (server.get_cmd_option("nojournal") or
(options_override and "nojournal" in options_override))
if (os.path.exists(lock_file_path) and
server.is_arbiter_server() and
no_journal):
log_warning("WARNING: Detected a lock file ('%s') for your server '%s'"
" ; since this server is an arbiter, there is no need for"
" repair or other action. Deleting mongod.lock and"
" proceeding..." % (lock_file_path, server.id))
try:
os.remove(lock_file_path)
except Exception, e:
log_exception(e)
raise MongoctlException("Error while trying to delete '%s'. "
"Cause: %s" % (lock_file_path, e))
###############################################################################
def _post_server_start(server, server_pid, **kwargs):
if isinstance(server, MongodServer):
_post_mongod_server_start(server, server_pid, **kwargs)
###############################################################################
def _post_mongod_server_start(server, server_pid, **kwargs):
try:
# prepare the server
prepare_mongod_server(server)
maybe_config_server_repl_set(server, rs_add=kwargs.get("rs_add"),
no_init=kwargs.get("no_init"))
except Exception, e:
log_exception(e)
log_error("Unable to fully prepare server '%s'. Cause: %s \n"
"Stop server now if more preparation is desired..." %
(server.id, e))
shall_we_terminate(server_pid)
exit(1)
###############################################################################
def prepare_mongod_server(server):
"""
Contains post start server operations
"""
log_info("Preparing server '%s' for use as configured..." %
server.id)
# setup the local users
users.setup_server_local_users(server)
if not server.is_cluster_member() or server.is_config_server():
users.setup_server_users(server)
###############################################################################
def shall_we_terminate(mongod_pid):
def killit():
utils.kill_process(mongod_pid, force=True)
log_info("Server process terminated at operator behest.")
(condemned, _) = prompt_execute_task("Kill server now?", killit)
return condemned
###############################################################################
def maybe_config_server_repl_set(server, rs_add=False, no_init=False):
# if the server belongs to a replica set cluster,
# then prompt the user to init the replica set IF not already initialized
# AND server is NOT an Arbiter
# OTHERWISE prompt to add server to replica if server is not added yet
cluster = server.get_replicaset_cluster()
if cluster is not None:
log_verbose("Server '%s' is a member in the configuration for"
" cluster '%s'." % (server.id,cluster.id))
if not cluster.is_replicaset_initialized():
log_info("Replica set cluster '%s' has not been initialized yet." %
cluster.id)
if cluster.get_member_for(server).can_become_primary():
if not no_init:
if rs_add:
cluster.initialize_replicaset(server)
else:
prompt_init_replica_cluster(cluster, server)
else:
log_warning("Replicaset is not initialized and you "
"specified --rs-add-nonit. Not adding to "
"replicaset...")
else:
log_info("Skipping replica set initialization because "
"server '%s' cannot be elected primary." %
server.id)
else:
log_verbose("No need to initialize cluster '%s', as it has"
" already been initialized." % cluster.id)
if not cluster.is_member_configured_for(server):
if rs_add:
cluster.add_member_to_replica(server)
else:
prompt_add_member_to_replica(cluster, server)
else:
log_verbose("Server '%s' is already added to the replicaset"
" conf of cluster '%s'." %
(server.id, cluster.id))
###############################################################################
def prompt_init_replica_cluster(replica_cluster,
suggested_primary_server):
prompt = ("Do you want to initialize replica set cluster '%s' using "
"server '%s'?" %
(replica_cluster.id, suggested_primary_server.id))
def init_repl_func():
replica_cluster.initialize_replicaset(suggested_primary_server)
prompt_execute_task(prompt, init_repl_func)
###############################################################################
def prompt_add_member_to_replica(replica_cluster, server):
prompt = ("Do you want to add server '%s' to replica set cluster '%s'?" %
(server.id, replica_cluster.id))
def add_member_func():
replica_cluster.add_member_to_replica(server)
prompt_execute_task(prompt, add_member_func)
###############################################################################
def _start_server_process_4real(server, options_override=None):
mk_server_home_dir(server)
# if the pid file is not created yet then this is the first time this
# server is started (or at least by mongoctl)
first_time = os.path.exists(server.get_pid_file_path())
# generate key file if needed
if server.needs_repl_key():
get_generate_key_file(server)
# create the start command line
start_cmd = generate_start_command(server, options_override)
start_cmd_str = " ".join(start_cmd)
first_time_msg = " for the first time" if first_time else ""
log_info("Starting server '%s'%s..." % (server.id, first_time_msg))
log_info("\nExecuting command:\n%s\n" % start_cmd_str)
child_process_out = None
if server.is_fork():
child_process_out = subprocess.PIPE
global __mongod_pid__
global __current_server__
parent_mongod = create_subprocess(start_cmd,
stdout=child_process_out,
preexec_fn=server_process_preexec)
if server.is_fork():
__mongod_pid__ = get_forked_mongod_pid(parent_mongod)
else:
__mongod_pid__ = parent_mongod.pid
__current_server__ = server
return __mongod_pid__
###############################################################################
def get_forked_mongod_pid(parent_mongod):
# Get stdout from subprocess
output = parent_mongod.communicate()[0]
pid_re_expr = "forked process: ([0-9]+)"
pid_str = re.search(pid_re_expr, output).groups()[0]
return int(pid_str)
###############################################################################
def start_server_process(server,options_override=None):
mongod_pid = _start_server_process_4real(server, options_override)
log_info("Will now wait for server '%s' to start up."
" Enjoy mongod's log for now!" %
server.id)
log_info("\n****************************************************************"
"***************")
log_info("* START: tail of log file at '%s'" % server.get_log_file_path())
log_info("******************************************************************"
"*************\n")
log_tailer = tail_server_log(server)
# wait until the server starts
try:
is_online = wait_for(server_started_predicate(server, mongod_pid),
timeout=300)
finally:
# stop tailing
stop_tailing(log_tailer)
log_info("\n****************************************************************"
"***************")
log_info("* END: tail of log file at '%s'" % server.get_log_file_path())
log_info("******************************************************************"
"*************\n")
if not is_online:
raise MongoctlException("Timed out waiting for server '%s' to start. "
"Please tail the log file to monitor further "
"progress." %
server.id)
log_info("Server '%s' started successfully! (pid=%s)\n" %
(server.id, mongod_pid))
return mongod_pid
###############################################################################
def server_process_preexec():
""" make the server ignore ctrl+c signals and have the global mongoctl
signal handler take care of it
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
_set_process_limits()
###############################################################################
def _set_process_limits():
for (res_name, description, desired_limit) in PROCESS_LIMITS :
_set_a_process_limit(res_name, desired_limit, description)
###############################################################################
def _set_a_process_limit(resource_name, desired_limit, description):
which_resource = getattr(resource, resource_name)
(soft, hard) = resource.getrlimit(which_resource)
def set_resource(attempted_value):
log_verbose("Trying setrlimit(resource.%s, (%d, %d))" %
(resource_name, attempted_value, hard))
resource.setrlimit(which_resource, (attempted_value, hard))
log_info("Setting OS limit on %s for process (desire up to %d)..."
"\n\t Current limit values: soft = %d, hard = %d" %
(description, desired_limit, soft, hard))
_negotiate_process_limit(set_resource, desired_limit, soft, hard)
log_info("Resulting OS limit on %s for process: " % description +
"soft = %d, hard = %d" % resource.getrlimit(which_resource))
###############################################################################
def _rlimit_min(one_val, nother_val):
"""Returns the more stringent rlimit value. -1 means no limit."""
if one_val < 0 or nother_val < 0 :
return max(one_val, nother_val)
else:
return min(one_val, nother_val)
###############################################################################
def _negotiate_process_limit(set_resource, desired_limit, soft, hard):
best_possible = _rlimit_min(hard, desired_limit)
worst_possible = soft
attempt = best_possible # be optimistic for initial attempt
while abs(best_possible - worst_possible) > 1 :
try:
set_resource(attempt)
log_verbose(" That worked! Should I negotiate further?")
worst_possible = attempt
except:
log_verbose(" Phooey. That didn't work.")
if attempt < 0 :
log_info("\tCannot remove soft limit on resource.")
return
best_possible = attempt + (1 if best_possible < attempt else -1)
attempt = (best_possible + worst_possible) / 2
###############################################################################
# MONGOD Start Command functions
###############################################################################
def generate_start_command(server, options_override=None):
"""
Check if we need to use numactl if we are running on a NUMA box.
10gen recommends using numactl on NUMA. For more info, see
http://www.mongodb.org/display/DOCS/NUMA
"""
command = []
if mongod_needs_numactl():
log_info("Running on a NUMA machine...")
command = apply_numactl(command)
# append the mongod executable
command.append(get_server_executable(server))
# create the command args
cmd_options = server.export_cmd_options(options_override=options_override)
command.extend(options_to_command_args(cmd_options))
return command
###############################################################################
def server_stopped_predicate(server, pid):
def server_stopped():
return (not server.is_online() and
(pid is None or not is_pid_alive(pid)))
return server_stopped
###############################################################################
def server_started_predicate(server, mongod_pid):
def server_started():
# check if the command failed
if not is_pid_alive(mongod_pid):
raise MongoctlException("Could not start the server. Please check"
" the log file.")
return server.is_online()
return server_started
###############################################################################
# NUMA Related functions
###############################################################################
def mongod_needs_numactl():
""" Logic kind of copied from MongoDB (mongodb-src/util/version.cpp) ;)
Return true IF we are on a box with a NUMA enabled kernel and more
than 1 numa node (they start at node0).
"""
return dir_exists("/sys/devices/system/node/node1")
###############################################################################
def apply_numactl(command):
numactl_exe = get_numactl_exe()
if numactl_exe:
log_info("Using numactl '%s'" % numactl_exe)
return [numactl_exe, "--interleave=all"] + command
else:
msg = ("You are running on a NUMA machine. It is recommended to run "
"your server using numactl but we cannot find a numactl "
"executable in your PATH. Proceeding might cause problems that"
" will manifest in strange ways, such as massive slow downs for"
" periods of time or high system cpu time. Proceed?")
if not prompt_confirm(msg):
exit(0)
###############################################################################
def get_numactl_exe():
return which("numactl")
###############################################################################
def mk_server_home_dir(server):
# ensure server home dir exists if it has one
server_dir = server.get_server_home()
if not server_dir:
return
log_verbose("Ensuring that server's home dir '%s' exists..." % server_dir)
if ensure_dir(server_dir):
log_verbose("server home dir %s already exists!" % server_dir)
else:
log_verbose("server home dir '%s' created successfully" % server_dir)
###############################################################################
def get_generate_key_file(server):
cluster = server.get_cluster()
key_file_path = server.get_key_file() or server.get_default_key_file_path()
# Generate the key file if it does not exist
if not os.path.exists(key_file_path):
key_file = open(key_file_path, 'w')
key_file.write(cluster.get_repl_key())
key_file.close()
# set the permissions required by mongod
os.chmod(key_file_path,stat.S_IRUSR)
return key_file_path
###############################################################################
def get_server_executable(server):
if isinstance(server, MongodServer):
return get_mongod_executable(server)
elif isinstance(server, MongosServer):
return get_mongos_executable(server)
###############################################################################
def get_mongod_executable(server):
mongod_exe = get_mongo_executable(server.get_mongo_version(),
'mongod',
version_check_pref=VERSION_PREF_EXACT)
return mongod_exe.path
###############################################################################
def get_mongos_executable(server):
mongos_exe = get_mongo_executable(server.get_mongo_version(),
'mongos',
version_check_pref=VERSION_PREF_EXACT)
return mongos_exe.path
###############################################################################
# SIGNAL HANDLER FUNCTIONS
###############################################################################
#TODO Remove this ugly signal handler and use something more elegant
def mongoctl_signal_handler(signal_val, frame):
global __mongod_pid__
# otherwise prompt to kill server
global __current_server__
def kill_child(child_process):
try:
if child_process.poll() is None:
log_verbose("Killing child process '%s'" % child_process )
child_process.terminate()
except Exception, e:
log_exception(e)
log_verbose("Unable to kill child process '%s': Cause: %s" %
(child_process, e))
def exit_mongoctl():
# kill all children then exit
map(kill_child, get_child_processes())
exit(0)
# if there is no mongod server yet then exit
if __mongod_pid__ is None:
exit_mongoctl()
else:
prompt_execute_task("Kill server '%s'?" % __current_server__.id,
exit_mongoctl)
###############################################################################
# Register the global mongoctl signal handler
signal.signal(signal.SIGINT, mongoctl_signal_handler)
###############################################################################
SUPPORTED_MONGOD_OPTIONS = [
"verbose",
"quiet",
"port",
"bind_ip",
"maxConns",
"objcheck",
"logpath",
"logappend",
"pidfilepath",
"keyFile",
"nounixsocket",
"unixSocketPrefix",
"auth",
"cpu",
"dbpath",
"diaglog",
"directoryperdb",
"journal",
"journalOptions",
"journalCommitInterval",
"ipv6",
"jsonp",
"noauth",
"nohttpinterface",
"nojournal",
"noprealloc",
"notablescan",
"nssize",
"profile",
"quota",
"quotaFiles",
"rest",
"repair",
"repairpath",
"slowms",
"smallfiles",
"syncdelay",
"sysinfo",
"upgrade",
"fastsync",
"oplogSize",
"master",
"slave",
"source",
"only",
"slavedelay",
"autoresync",
"replSet",
"configsvr",
"shardsvr",
"noMoveParanoia",
"setParameter"
]
###############################################################################
SUPPORTED_MONGOS_OPTIONS = [
"verbose",
"quiet",
"port",
"bind_ip",
"maxConns",
"logpath",
"logappend",
"pidfilepath",
"keyFile",
"nounixsocket",
"unixSocketPrefix",
"ipv6",
"jsonp",
"nohttpinterface",
"upgrade",
"setParameter",
"syslog",
"configdb",
"localThreshold",
"test",
"chunkSize",
"noscripting"
]
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/commands/server/start.py",
"copies": "1",
"size": "26038",
"license": "mit",
"hash": 2312401157750846000,
"line_mean": 36.1971428571,
"line_max": 102,
"alpha_frac": 0.51555419,
"autogenerated": false,
"ratio": 4.4425865893192285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5458140779319228,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import os
import subprocess
import pwd
import time
import socket
import psutil
import urlparse
import json
from bson import json_util
from mongoctl_logging import *
from errors import MongoctlException
import signal
###############################################################################
def namespace_get_property(namespace, name):
if hasattr(namespace, name):
return getattr(namespace,name)
return None
###############################################################################
def to_string(thing):
return "" if thing is None else str(thing)
###############################################################################
def document_pretty_string(document):
return json.dumps(document, indent=4, default=json_util.default)
###############################################################################
def listify(object):
if isinstance(object, list):
return object
return [object]
###############################################################################
def is_url(value):
scheme = urlparse.urlparse(value).scheme
return scheme is not None and scheme != ''
###############################################################################
def wait_for(predicate, timeout=None, sleep_duration=2, grace=True):
start_time = now()
must_retry = may_retry = not predicate()
if must_retry and grace:
# optimizing for predicates whose first invocations may be slooooooow
log_verbose("GRACE: First eval finished in %d secs - resetting timer." %
(now() - start_time))
start_time = now()
while must_retry and may_retry:
must_retry = not predicate()
if must_retry:
net_time = now() - start_time
if timeout and net_time + sleep_duration > timeout:
may_retry = False
else:
left = "[-%d sec] " % (timeout - net_time) if timeout else ""
log_info("-- waiting %s--" % left)
time.sleep(sleep_duration)
return not must_retry
###############################################################################
def now():
return time.time()
###############################################################################
# OS Functions
###############################################################################
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
###############################################################################
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
###############################################################################
def ensure_dir(dir_path):
"""
If DIR_PATH does not exist, makes it. Failing that, raises Exception.
Returns True if dir already existed; False if it had to be made.
"""
exists = dir_exists(dir_path)
if not exists:
try:
os.makedirs(dir_path)
except(Exception,RuntimeError), e:
raise Exception("Unable to create directory %s. Cause %s" %
(dir_path, e))
return exists
###############################################################################
def dir_exists(path):
return os.path.exists(path) and os.path.isdir(path)
###############################################################################
def list_dir_files(path):
return [name for name in os.listdir(path) if
os.path.isfile(os.path.join(path, name))]
###############################################################################
def resolve_path(path):
# handle file uris
path = path.replace("file://", "")
# expand vars
path = os.path.expandvars(custom_expanduser(path))
# Turn relative paths to absolute
try:
path = os.path.abspath(path)
except OSError, e:
# handle the case where cwd does not exist
if "No such file or directory" in str(e):
pass
else:
raise
return path
###############################################################################
def custom_expanduser(path):
if path.startswith("~"):
login = get_current_login()
home_dir = os.path.expanduser( "~%s" % login)
path = path.replace("~", home_dir, 1)
return path
###############################################################################
def get_current_login():
try:
pwuid = pwd.getpwuid(os.geteuid())
return pwuid.pw_name
except Exception, e:
raise Exception("Error while trying to get current os login. %s" % e)
###############################################################################
# sub-processing functions
###############################################################################
def call_command(command, bubble_exit_code=False, **kwargs):
try:
return subprocess.check_call(command, **kwargs)
except subprocess.CalledProcessError, e:
if bubble_exit_code:
exit(e.returncode)
else:
raise e
###############################################################################
def execute_command(command, **kwargs):
# Python 2.7+ : Use the new method because i think its better
if hasattr(subprocess, 'check_output'):
return subprocess.check_output(command,stderr=subprocess.STDOUT, **kwargs)
else: # Python 2.6 compatible, check_output is not available in 2.6
return subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs).communicate()[0]
###############################################################################
def is_pid_alive(pid):
try:
os.kill(pid,0)
return True
except OSError:
return False
###############################################################################
def kill_process(pid, force=False):
sig = signal.SIGKILL if force else signal.SIGTERM
try:
os.kill(pid, sig)
return True
except OSError:
return False
###############################################################################
def kill_current_process_child_processes():
kill_all_child_processes(os.getpid())
###############################################################################
def kill_all_child_processes(pid):
try:
print "Killing process %s child processes" % pid
process = psutil.Process(pid=pid)
children = process.get_children()
if children:
for child in children:
print "Killing child process %s" % child.pid
kill_all_child_processes(child.pid)
else:
print "Process %s has no children" % pid
process.kill()
except Exception, ex:
log_debug("Error while killing all child processes for %s" % pid)
log_exception(ex)
###############################################################################
# HELPER functions
###############################################################################
def timedelta_total_seconds(td):
"""
Equivalent python 2.7+ timedelta.total_seconds()
This was added for python 2.6 compatibilty
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
###############################################################################
def is_valid_member_address(address):
if address is None:
return False
host_port = address.split(":")
return (len(host_port) == 2
and host_port[0]
and host_port[1]
and str(host_port[1]).isdigit())
###############################################################################
# Network Utils Functions
###############################################################################
def is_host_local(host):
if (host == "localhost" or
host == "127.0.0.1"):
return True
return is_same_host(socket.gethostname(), host)
###############################################################################
def is_same_host(host1, host2):
"""
Returns true if host1 == host2 OR map to the same host (using DNS)
"""
try:
if host1 == host2:
return True
else:
ips1 = get_host_ips(host1)
ips2 = get_host_ips(host2)
return len(set(ips1) & set(ips2)) > 0
except Exception, ex:
log_exception(ex)
return False
###############################################################################
def is_same_address(addr1, addr2):
"""
Where the two addresses are in the host:port
Returns true if ports are equals and hosts are the same using is_same_host
"""
hostport1 = addr1.split(":")
hostport2 = addr2.split(":")
return (is_same_host(hostport1[0], hostport2[0]) and
hostport1[1] == hostport2[1])
###############################################################################
def get_host_ips(host):
try:
ips = []
try:
addr_info = socket.getaddrinfo(host, None)
# Can't resolve -> obviously has no IPs -> return default empty list
except socket.gaierror:
return ips
for elem in addr_info:
ip = elem[4]
if ip not in ips:
ips.append(ip)
# TODO remove this temp hack that works around the case where
# host X has more IPs than X.foo.com.
if len(host.split(".")) == 3:
try:
ips.extend(get_host_ips(host.split(".")[0]))
except Exception, ex:
pass
return ips
except Exception, e:
raise MongoctlException("Invalid host '%s'. Cause: %s" % (host, e))
###############################################################################
def resolve_class(kls):
if kls == "dict":
return dict
try:
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
except Exception, e:
raise Exception("Cannot resolve class '%s'. Cause: %s" % (kls, e))
###############################################################################
def download_url(url, destination=None, show_errors=False):
destination = destination or os.getcwd()
log_info("Downloading %s..." % url)
if which("curl"):
download_cmd = ['curl', '-O', '-L']
if show_errors:
download_cmd.append('-Ss')
elif which("wget"):
download_cmd = ['wget']
else:
msg = ("Cannot download file.You need to have 'curl' or 'wget"
"' command in your path in order to proceed.")
raise MongoctlException(msg)
download_cmd.append(url)
call_command(download_cmd, cwd=destination)
file_name = url.split("/")[-1]
return os.path.join(destination, file_name)
###############################################################################
def extract_archive(archive_name):
log_info("Extracting %s..." % archive_name)
if not which("tar"):
msg = ("Cannot extract archive.You need to have 'tar' command in your"
" path in order to proceed.")
raise MongoctlException(msg)
dir_name = archive_name.replace(".tgz", "").replace(".tar.gz", "")
ensure_dir(dir_name)
tar_cmd = ['tar', 'xvf', archive_name, "-C", dir_name,
"--strip-components", "1"]
call_command(tar_cmd)
return dir_name
###############################################################################
def validate_openssl():
"""
Validates OpenSSL to ensure it has TLS_FALLBACK_SCSV supported
"""
try:
open_ssl_exe = which("openssl")
if not open_ssl_exe:
raise Exception("No openssl exe found in path")
try:
# execute a an invalid command to get output with available options
# since openssl does not have a --help option unfortunately
execute_command([open_ssl_exe, "s_client", "invalidDummyCommand"])
except subprocess.CalledProcessError as e:
if "fallback_scsv" not in e.output:
raise Exception("openssl does not support TLS_FALLBACK_SCSV")
except Exception as e:
raise MongoctlException("Unsupported OpenSSL. %s" % e)
###############################################################################
def time_string(time_seconds):
days, remainder = divmod(time_seconds, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
result = []
if days:
result.append("%d day(s)" % days)
if days or hours:
result.append("%d hour(s)" % hours)
if days or hours or minutes:
result.append("%d minute(s)" % minutes)
result.append("%d second(s)" % seconds)
return " ".join(result)
###############################################################################
def system_memory_size_gbs():
"""
credit https://stackoverflow.com/questions/22102999/get-total-physical-memory-in-python
:return: total system memory size in gbs
"""
mem_bytes = psutil.virtual_memory().total
return mem_bytes/(1024.**3)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/utils.py",
"copies": "1",
"size": "13621",
"license": "mit",
"hash": 5686721982741939000,
"line_mean": 31.9009661836,
"line_max": 91,
"alpha_frac": 0.4648704207,
"autogenerated": false,
"ratio": 4.741037243299687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005700062638723354,
"num_lines": 414
} |
__author__ = 'abdul'
import os
import subprocess
import pwd
import time
import socket
import urlparse
import json
from bson import json_util
from mongoctl_logging import *
from errors import MongoctlException
###############################################################################
def namespace_get_property(namespace, name):
if hasattr(namespace, name):
return getattr(namespace,name)
return None
###############################################################################
def to_string(thing):
return "" if thing is None else str(thing)
###############################################################################
def document_pretty_string(document):
return json.dumps(document, indent=4, default=json_util.default)
###############################################################################
def listify(object):
if isinstance(object, list):
return object
return [object]
###############################################################################
def is_url(value):
scheme = urlparse.urlparse(value).scheme
return scheme is not None and scheme != ''
###############################################################################
def wait_for(predicate, timeout=None, sleep_duration=2, grace=True):
start_time = now()
must_retry = may_retry = not predicate()
if must_retry and grace:
# optimizing for predicates whose first invocations may be slooooooow
log_verbose("GRACE: First eval finished in %d secs - resetting timer." %
(now() - start_time))
start_time = now()
while must_retry and may_retry:
must_retry = not predicate()
if must_retry:
net_time = now() - start_time
if timeout and net_time + sleep_duration > timeout:
may_retry = False
else:
left = "[-%d sec] " % (timeout - net_time) if timeout else ""
log_info("-- waiting %s--" % left)
time.sleep(sleep_duration)
return not must_retry
###############################################################################
def now():
return time.time()
###############################################################################
# OS Functions
###############################################################################
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
###############################################################################
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
###############################################################################
def ensure_dir(dir_path):
"""
If DIR_PATH does not exist, makes it. Failing that, raises Exception.
Returns True if dir already existed; False if it had to be made.
"""
exists = dir_exists(dir_path)
if not exists:
try:
os.makedirs(dir_path)
except(Exception,RuntimeError), e:
raise Exception("Unable to create directory %s. Cause %s" %
(dir_path, e))
return exists
###############################################################################
def dir_exists(path):
return os.path.exists(path) and os.path.isdir(path)
###############################################################################
def resolve_path(path):
# handle file uris
path = path.replace("file://", "")
# expand vars
path = os.path.expandvars(custom_expanduser(path))
# Turn relative paths to absolute
try:
path = os.path.abspath(path)
except OSError, e:
# handle the case where cwd does not exist
if "No such file or directory" in str(e):
pass
else:
raise
return path
###############################################################################
def custom_expanduser(path):
if path.startswith("~"):
login = get_current_login()
home_dir = os.path.expanduser( "~%s" % login)
path = path.replace("~", home_dir, 1)
return path
###############################################################################
def get_current_login():
try:
pwuid = pwd.getpwuid(os.geteuid())
return pwuid.pw_name
except Exception, e:
raise Exception("Error while trying to get current os login. %s" % e)
###############################################################################
# sub-processing functions
###############################################################################
def call_command(command, bubble_exit_code=False):
try:
return subprocess.check_call(command)
except subprocess.CalledProcessError, e:
if bubble_exit_code:
exit(e.returncode)
else:
raise e
###############################################################################
def execute_command(command):
# Python 2.7+ : Use the new method because i think its better
if hasattr(subprocess, 'check_output'):
return subprocess.check_output(command,stderr=subprocess.STDOUT)
else: # Python 2.6 compatible, check_output is not available in 2.6
return subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
###############################################################################
def is_pid_alive(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
###############################################################################
def kill_process(pid, force=False):
signal = 9 if force else 1
try:
os.kill(pid, signal)
return True
except OSError:
return False
###############################################################################
# HELPER functions
###############################################################################
def timedelta_total_seconds(td):
"""
Equivalent python 2.7+ timedelta.total_seconds()
This was added for python 2.6 compatibilty
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
###############################################################################
def is_valid_member_address(address):
if address is None:
return False
host_port = address.split(":")
return (len(host_port) == 2
and host_port[0]
and host_port[1]
and str(host_port[1]).isdigit())
###############################################################################
# Network Utils Functions
###############################################################################
def is_host_local(host):
if (host == "localhost" or
host == "127.0.0.1"):
return True
return is_same_host(socket.gethostname(), host)
###############################################################################
def is_same_host(host1, host2):
"""
Returns true if host1 == host2 OR map to the same host (using DNS)
"""
if host1 == host2:
return True
else:
ips1 = get_host_ips(host1)
ips2 = get_host_ips(host2)
return len(set(ips1) & set(ips2)) > 0
###############################################################################
def is_same_address(addr1, addr2):
"""
Where the two addresses are in the host:port
Returns true if ports are equals and hosts are the same using is_same_host
"""
hostport1 = addr1.split(":")
hostport2 = addr2.split(":")
return (is_same_host(hostport1[0], hostport2[0]) and
hostport1[1] == hostport2[1])
###############################################################################
def get_host_ips(host):
try:
ips = []
addr_info = socket.getaddrinfo(host, None)
for elem in addr_info:
ip = elem[4]
if ip not in ips:
ips.append(ip)
# TODO remove this temp hack that works around the case where
# host X has more IPs than X.foo.com.
if len(host.split(".")) == 3:
try:
ips.extend(get_host_ips(host.split(".")[0]))
except Exception, ex:
pass
return ips
except Exception, e:
raise MongoctlException("Invalid host '%s'. Cause: %s" % (host, e))
###############################################################################
def timedelta_total_seconds(td):
"""
Equivalent python 2.7+ timedelta.total_seconds()
This was added for python 2.6 compatibility
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
###############################################################################
def resolve_class(kls):
if kls == "dict":
return dict
try:
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
except Exception, e:
raise Exception("Cannot resolve class '%s'. Cause: %s" % (kls, e)) | {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/utils.py",
"copies": "2",
"size": "9428",
"license": "mit",
"hash": 7165811512469557000,
"line_mean": 30.9627118644,
"line_max": 80,
"alpha_frac": 0.4411327959,
"autogenerated": false,
"ratio": 4.862300154718927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6303432950618927,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import os
from mongoctl.mongoctl_logging import log_info, log_error, log_exception
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command, which
from mongoctl.binary_repo import (
get_binary_repository, S3MongoDBBinaryRepository
)
from mongoctl.mongodb_version import make_version_info, MongoDBEdition
from mongoctl.commands.command_utils import get_mongo_installation
###############################################################################
# push to repo command
###############################################################################
def publish_mongodb_command(parsed_options):
push_mongodb(parsed_options.repo,
parsed_options.version,
mongodb_edition=parsed_options.edition,
access_key=parsed_options.accessKey,
secret_key=parsed_options.secretKey)
###############################################################################
# push_mongodb
###############################################################################
def push_mongodb(repo_name, mongodb_version, mongodb_edition=None,
access_key=None, secret_key=None):
"""
:param repo_name:
:param mongodb_version:
:param mongodb_edition:
:return:
"""
mongodb_edition = mongodb_edition or MongoDBEdition.COMMUNITY
repo = get_binary_repository(repo_name)
if access_key and isinstance(repo, S3MongoDBBinaryRepository):
repo.access_key = access_key
repo.secret_key = secret_key
repo.validate()
version_info = make_version_info(mongodb_version, mongodb_edition)
mongodb_install_dir = get_mongo_installation(version_info)
if not mongodb_install_dir:
raise MongoctlException("No mongodb installation found for '%s'" %
version_info)
mongodb_install_home = os.path.dirname(mongodb_install_dir)
target_archive_name = repo.get_archive_name(mongodb_version,
mongodb_edition)
target_archive_path = os.path.join(mongodb_install_home,
target_archive_name)
mongodb_install_dir_name = os.path.basename(mongodb_install_dir)
log_info("Taring MongoDB at '%s'" % mongodb_install_dir_name)
tar_exe = which("tar")
tar_cmd = [tar_exe, "-cvzf", target_archive_name, mongodb_install_dir_name]
call_command(tar_cmd, cwd=mongodb_install_home)
log_info("Uploading tar to repo")
repo.upload_file(mongodb_version, mongodb_edition, target_archive_path)
# cleanup
log_info("Cleanup")
try:
os.remove(target_archive_path)
except Exception, e:
log_error(str(e))
log_exception(e)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/misc/publish_mongodb.py",
"copies": "1",
"size": "2777",
"license": "mit",
"hash": -878568143732092500,
"line_mean": 29.1847826087,
"line_max": 79,
"alpha_frac": 0.5851638459,
"autogenerated": false,
"ratio": 4.048104956268221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01161301734083207,
"num_lines": 92
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.